Beispiel #1
0
 def _add_comparison_summary(gan_model, reconstructions):
   image_list = (array_ops.unstack(gan_model.generator_inputs[:1]) +
                 array_ops.unstack(gan_model.generated_data[:1]) +
                 array_ops.unstack(reconstructions[:1]))
   summary.image(
       'image_comparison', eval_utils.image_reshaper(
           image_list, num_cols=len(image_list)), max_outputs=1)
def add_gan_model_image_summaries(gan_model, grid_size=10):
    """Adds image summaries for real and fake images.

  Args:
    gan_model: A GANModel tuple.
    grid_size: The size of an image grid.

  Raises:
    ValueError: If real and generated data aren't images.
  """
    _assert_is_image(gan_model.real_data)
    _assert_is_image(gan_model.generated_data)

    num_images = grid_size**2
    real_image_shape = gan_model.real_data.shape.as_list()[1:3]
    generated_image_shape = gan_model.generated_data.shape.as_list()[1:3]
    real_channels = gan_model.real_data.shape.as_list()[3]
    generated_channels = gan_model.generated_data.shape.as_list()[3]

    summary.image('real_data',
                  eval_utils.image_grid(gan_model.real_data[:num_images],
                                        grid_shape=(grid_size, grid_size),
                                        image_shape=real_image_shape,
                                        num_channels=real_channels),
                  max_outputs=1)
    summary.image('generated_data',
                  eval_utils.image_grid(gan_model.generated_data[:num_images],
                                        grid_shape=(grid_size, grid_size),
                                        image_shape=generated_image_shape,
                                        num_channels=generated_channels),
                  max_outputs=1)
    add_gan_model_summaries(gan_model)
Beispiel #3
0
    def set_model(self, model):
        self.model = model
        self.sess = K.get_session()
        if self.histogram_freq and self.merged is None:
            for layer in self.model.layers:

                for weight in layer.weights:
                    tf_summary.histogram(weight.name, weight)
                    if self.write_images:
                        w_img = array_ops.squeeze(weight)
                        shape = w_img.get_shape()
                        if len(shape) > 1 and shape[0] > shape[1]:
                            w_img = array_ops.transpose(w_img)
                        if len(shape) == 1:
                            w_img = array_ops.expand_dims(w_img, 0)
                        w_img = array_ops.expand_dims(
                            array_ops.expand_dims(w_img, 0), -1)
                        tf_summary.image(weight.name, w_img)

                if hasattr(layer, 'output'):
                    tf_summary.histogram('{}_out'.format(layer.name),
                                         layer.output)
        self.merged = tf_summary.merge_all()

        if self.write_graph:
            self.writer = tf_summary.FileWriter(self.log_dir, self.sess.graph)
        else:
            self.writer = tf_summary.FileWriter(self.log_dir)
Beispiel #4
0
  def set_model(self, model):
    self.model = model
    self.sess = K.get_session()
    if self.histogram_freq and self.merged is None:
      for layer in self.model.layers:

        for weight in layer.weights:
          tf_summary.histogram(weight.name, weight)
          if self.write_images:
            w_img = array_ops.squeeze(weight)
            shape = w_img.get_shape()
            if len(shape) > 1 and shape[0] > shape[1]:
              w_img = array_ops.transpose(w_img)
            if len(shape) == 1:
              w_img = array_ops.expand_dims(w_img, 0)
            w_img = array_ops.expand_dims(array_ops.expand_dims(w_img, 0), -1)
            tf_summary.image(weight.name, w_img)

        if hasattr(layer, 'output'):
          tf_summary.histogram('{}_out'.format(layer.name), layer.output)
    self.merged = tf_summary.merge_all()

    if self.write_graph:
      self.writer = tf_summary.FileWriter(self.log_dir, self.sess.graph)
    else:
      self.writer = tf_summary.FileWriter(self.log_dir)
 def _add_comparison_summary(gan_model, reconstructions):
   image_list = (array_ops.unstack(gan_model.generator_inputs[:1]) +
                 array_ops.unstack(gan_model.generated_data[:1]) +
                 array_ops.unstack(reconstructions[:1]))
   summary.image(
       'image_comparison', eval_utils.image_reshaper(
           image_list, num_cols=len(image_list)), max_outputs=1)
Beispiel #6
0
    def set_model(self, model):
        self.model = model
        self.sess = K.get_session()
        if self.histogram_freq and self.merged is None:
            for layer in self.model.layers:
                for weight in layer.weights:
                    mapped_weight_name = weight.name.replace(':', '_')
                    tf_summary.histogram(mapped_weight_name, weight)
                    if self.write_grads:
                        grads = model.optimizer.get_gradients(
                            model.total_loss, weight)

                        def is_indexed_slices(grad):
                            return type(grad).__name__ == 'IndexedSlices'

                        grads = [
                            grad.values if is_indexed_slices(grad) else grad
                            for grad in grads
                        ]
                        tf_summary.histogram(
                            '{}_grad'.format(mapped_weight_name), grads)
                    if self.write_images:
                        w_img = array_ops.squeeze(weight)
                        shape = K.int_shape(w_img)
                        if len(shape) == 2:  # dense layer kernel case
                            if shape[0] > shape[1]:
                                w_img = array_ops.transpose(w_img)
                                shape = K.int_shape(w_img)
                            w_img = array_ops.reshape(
                                w_img, [1, shape[0], shape[1], 1])
                        elif len(shape) == 3:  # convnet case
                            if K.image_data_format() == 'channels_last':
                                # switch to channels_first to display
                                # every kernel as a separate image
                                w_img = array_ops.transpose(w_img,
                                                            perm=[2, 0, 1])
                                shape = K.int_shape(w_img)
                            w_img = array_ops.reshape(
                                w_img, [shape[0], shape[1], shape[2], 1])
                        elif len(shape) == 1:  # bias case
                            w_img = array_ops.reshape(w_img,
                                                      [1, shape[0], 1, 1])
                        else:
                            # not possible to handle 3D convnets etc.
                            continue

                        shape = K.int_shape(w_img)
                        assert len(shape) == 4 and shape[-1] in [1, 3, 4]
                        tf_summary.image(mapped_weight_name, w_img)

                if hasattr(layer, 'output'):
                    tf_summary.histogram('{}_out'.format(layer.name),
                                         layer.output)
        self.merged = tf_summary.merge_all()

        if self.write_graph:
            self.writer = tf_summary.FileWriter(self.log_dir, self.sess.graph)
        else:
            self.writer = tf_summary.FileWriter(self.log_dir)
def add_image_comparison_summaries(gan_model, num_comparisons=2,
                                   display_diffs=False):
  """Adds image summaries to compare triplets of images.

  The first image is the generator input, the second is the generator output,
  and the third is the real data. This style of comparison is useful for
  image translation problems, where the generator input is a corrupted image,
  the generator output is the reconstruction, and the real data is the target.

  Args:
    gan_model: A GANModel tuple.
    num_comparisons: The number of image triplets to display.
    display_diffs: Also display the difference between generated and target.

  Raises:
    ValueError: If real data, generated data, and generator inputs aren't
      images.
    ValueError: If the generator input, real, and generated data aren't all the
      same size.
  """
  if isinstance(gan_model, namedtuples.CycleGANModel):
    saved_params = locals()
    saved_params.pop('gan_model', None)
    with ops.name_scope('cyclegan_x2y_image_comparison_summaries'):
      add_image_comparison_summaries(gan_model.model_x2y, **saved_params)
    with ops.name_scope('cyclegan_y2x_image_comparison_summaries'):
      add_image_comparison_summaries(gan_model.model_y2x, **saved_params)
    return

  _assert_is_image(gan_model.generator_inputs)
  _assert_is_image(gan_model.generated_data)
  _assert_is_image(gan_model.real_data)

  gan_model.generated_data.shape.assert_is_compatible_with(
      gan_model.generator_inputs.shape)
  gan_model.real_data.shape.assert_is_compatible_with(
      gan_model.generated_data.shape)

  image_list = []
  image_list.extend(
      array_ops.unstack(gan_model.generator_inputs[:num_comparisons]))
  image_list.extend(
      array_ops.unstack(gan_model.generated_data[:num_comparisons]))
  image_list.extend(array_ops.unstack(gan_model.real_data[:num_comparisons]))
  if display_diffs:
    generated_list = array_ops.unstack(
        gan_model.generated_data[:num_comparisons])
    real_list = array_ops.unstack(gan_model.real_data[:num_comparisons])
    diffs = [
        math_ops.abs(math_ops.to_float(generated) - math_ops.to_float(real)) for
        generated, real in zip(generated_list, real_list)]
    image_list.extend(diffs)

  # Reshape image and display.
  summary.image(
      'image_comparison',
      eval_utils.image_reshaper(image_list, num_cols=num_comparisons),
      max_outputs=1)
Beispiel #8
0
  def set_model(self, model):
    """Sets Keras model and creates summary ops."""

    self.model = model
    self.sess = K.get_session()
    # only make histogram summary op if it hasn't already been made
    if self.histogram_freq and self.merged is None:
      for layer in self.model.layers:
        for weight in layer.weights:
          mapped_weight_name = weight.name.replace(':', '_')
          tf_summary.histogram(mapped_weight_name, weight)
          if self.write_images:
            w_img = array_ops.squeeze(weight)
            shape = K.int_shape(w_img)
            if len(shape) == 2:  # dense layer kernel case
              if shape[0] > shape[1]:
                w_img = array_ops.transpose(w_img)
                shape = K.int_shape(w_img)
              w_img = array_ops.reshape(w_img, [1, shape[0], shape[1], 1])
            elif len(shape) == 3:  # convnet case
              if K.image_data_format() == 'channels_last':
                # switch to channels_first to display
                # every kernel as a separate image
                w_img = array_ops.transpose(w_img, perm=[2, 0, 1])
                shape = K.int_shape(w_img)
              w_img = array_ops.reshape(w_img,
                                        [shape[0], shape[1], shape[2], 1])
            elif len(shape) == 1:  # bias case
              w_img = array_ops.reshape(w_img, [1, shape[0], 1, 1])
            else:
              # not possible to handle 3D convnets etc.
              continue

            shape = K.int_shape(w_img)
            assert len(shape) == 4 and shape[-1] in [1, 3, 4]
            tf_summary.image(mapped_weight_name, w_img)

        if self.write_grads:
          for weight in layer.trainable_weights:
            mapped_weight_name = weight.name.replace(':', '_')
            grads = model.optimizer.get_gradients(model.total_loss, weight)

            def is_indexed_slices(grad):
              return type(grad).__name__ == 'IndexedSlices'

            grads = [grad.values if is_indexed_slices(grad) else grad
                     for grad in grads]
            tf_summary.histogram('{}_grad'.format(mapped_weight_name), grads)

        if hasattr(layer, 'output'):
          tf_summary.histogram('{}_out'.format(layer.name), layer.output)
    self.merged = tf_summary.merge_all()

    if self.write_graph:
      self.writer = self._writer_class(self.log_dir, self.sess.graph)
    else:
      self.writer = self._writer_class(self.log_dir)
Beispiel #9
0
    def setUp(self):
        self.log_dir = tempfile.mkdtemp()

        # We use numpy.random to generate images. We seed to avoid non-determinism
        # in this test.
        numpy.random.seed(42)

        # Create image summaries for run foo.
        ops.reset_default_graph()
        sess = session.Session()
        placeholder = array_ops.placeholder(dtypes.uint8)
        summary.image(name="baz", tensor=placeholder)
        merged_summary_op = summary.merge_all()
        foo_directory = os.path.join(self.log_dir, "foo")
        writer = summary.FileWriter(foo_directory)
        writer.add_graph(sess.graph)
        for step in xrange(2):
            writer.add_summary(sess.run(merged_summary_op,
                                        feed_dict={
                                            placeholder:
                                            (numpy.random.rand(1, 16, 42, 3) *
                                             255).astype(numpy.uint8)
                                        }),
                               global_step=step)
        writer.close()

        # Create image summaries for run bar.
        ops.reset_default_graph()
        sess = session.Session()
        placeholder = array_ops.placeholder(dtypes.uint8)
        summary.image(name="quux", tensor=placeholder)
        merged_summary_op = summary.merge_all()
        bar_directory = os.path.join(self.log_dir, "bar")
        writer = summary.FileWriter(bar_directory)
        writer.add_graph(sess.graph)
        for step in xrange(2):
            writer.add_summary(sess.run(merged_summary_op,
                                        feed_dict={
                                            placeholder:
                                            (numpy.random.rand(1, 6, 8, 3) *
                                             255).astype(numpy.uint8)
                                        }),
                               global_step=step)
        writer.close()

        # Start a server with the plugin.
        multiplexer = event_multiplexer.EventMultiplexer({
            "foo": foo_directory,
            "bar": bar_directory,
        })
        plugin = images_plugin.ImagesPlugin()
        wsgi_app = application.TensorBoardWSGIApp(self.log_dir, [plugin],
                                                  multiplexer,
                                                  reload_interval=0)
        self.server = werkzeug_test.Client(wsgi_app, wrappers.BaseResponse)
        self.routes = plugin.get_plugin_apps(multiplexer, self.log_dir)
Beispiel #10
0
  def _make_histogram_ops(self, model):
    """Defines histogram ops when histogram_freq > 0."""
    # only make histogram summary op if it hasn't already been made
    if self.histogram_freq and self.merged is None:
      for layer in self.model.layers:
        for weight in layer.weights:
          mapped_weight_name = weight.name.replace(':', '_')
          tf_summary.histogram(mapped_weight_name, weight)
          if self.write_images:
            w_img = array_ops.squeeze(weight)
            shape = K.int_shape(w_img)
            if len(shape) == 2:  # dense layer kernel case
              if shape[0] > shape[1]:
                w_img = array_ops.transpose(w_img)
                shape = K.int_shape(w_img)
              w_img = array_ops.reshape(w_img, [1, shape[0], shape[1], 1])
            elif len(shape) == 3:  # convnet case
              if K.image_data_format() == 'channels_last':
                # switch to channels_first to display
                # every kernel as a separate image
                w_img = array_ops.transpose(w_img, perm=[2, 0, 1])
                shape = K.int_shape(w_img)
              w_img = array_ops.reshape(w_img,
                                        [shape[0], shape[1], shape[2], 1])
            elif len(shape) == 1:  # bias case
              w_img = array_ops.reshape(w_img, [1, shape[0], 1, 1])
            else:
              # not possible to handle 3D convnets etc.
              continue

            shape = K.int_shape(w_img)
            assert len(shape) == 4 and shape[-1] in [1, 3, 4]
            tf_summary.image(mapped_weight_name, w_img)

        if self.write_grads:
          for weight in layer.trainable_weights:
            mapped_weight_name = weight.name.replace(':', '_')
            grads = model.optimizer.get_gradients(model.total_loss, weight)

            def is_indexed_slices(grad):
              return type(grad).__name__ == 'IndexedSlices'

            grads = [
                grad.values if is_indexed_slices(grad) else grad
                for grad in grads
            ]
            tf_summary.histogram('{}_grad'.format(mapped_weight_name), grads)

        if hasattr(layer, 'output'):
          if isinstance(layer.output, list):
            for i, output in enumerate(layer.output):
              tf_summary.histogram('{}_out_{}'.format(layer.name, i), output)
          else:
            tf_summary.histogram('{}_out'.format(layer.name), layer.output)
Beispiel #11
0
def add_image_comparison_summaries(gan_model,
                                   num_comparisons=2,
                                   display_diffs=False):
    """Adds image summaries to compare triplets of images.

  The first image is the generator input, the second is the generator output,
  and the third is the real data. This style of comparison is useful for
  image translation problems, where the generator input is a corrupted image,
  the generator output is the reconstruction, and the real data is the target.

  Args:
    gan_model: A GANModel tuple.
    num_comparisons: The number of image triplets to display.
    display_diffs: Also display the difference between generated and target.

  Raises:
    ValueError: If real data, generated data, and generator inputs aren't
      images.
    ValueError: If the generator input, real, and generated data aren't all the
      same size.
  """
    _assert_is_image(gan_model.generator_inputs)
    _assert_is_image(gan_model.generated_data)
    _assert_is_image(gan_model.real_data)

    gan_model.generated_data.shape.assert_is_compatible_with(
        gan_model.generator_inputs.shape)
    gan_model.real_data.shape.assert_is_compatible_with(
        gan_model.generated_data.shape)

    image_list = []
    image_list.extend(
        array_ops.unstack(gan_model.generator_inputs[:num_comparisons]))
    image_list.extend(
        array_ops.unstack(gan_model.generated_data[:num_comparisons]))
    image_list.extend(array_ops.unstack(gan_model.real_data[:num_comparisons]))
    if display_diffs:
        generated_list = array_ops.unstack(
            gan_model.generated_data[:num_comparisons])
        real_list = array_ops.unstack(gan_model.real_data[:num_comparisons])
        diffs = [
            math_ops.abs(
                math_ops.cast(generated, dtypes.float32) -
                math_ops.cast(real, dtypes.float32))
            for generated, real in zip(generated_list, real_list)
        ]
        image_list.extend(diffs)

    # Reshape image and display.
    summary.image('image_comparison',
                  eval_utils.image_reshaper(image_list,
                                            num_cols=num_comparisons),
                  max_outputs=1)
def add_gan_model_image_summaries(gan_model, grid_size=4, model_summaries=True):
  """Adds image summaries for real and fake images.

  Args:
    gan_model: A GANModel tuple.
    grid_size: The size of an image grid.
    model_summaries: Also add summaries of the model.

  Raises:
    ValueError: If real and generated data aren't images.
  """
  if isinstance(gan_model, namedtuples.CycleGANModel):
    saved_params = locals()
    saved_params.pop('gan_model', None)
    with ops.name_scope('cyclegan_x2y_image_summaries'):
      add_gan_model_image_summaries(gan_model.model_x2y, **saved_params)
    with ops.name_scope('cyclegan_y2x_image_summaries'):
      add_gan_model_image_summaries(gan_model.model_y2x, **saved_params)
    return

  _assert_is_image(gan_model.real_data)
  _assert_is_image(gan_model.generated_data)

  num_images = grid_size ** 2
  real_image_shape = gan_model.real_data.shape.as_list()[1:3]
  generated_image_shape = gan_model.generated_data.shape.as_list()[1:3]
  real_channels = gan_model.real_data.shape.as_list()[3]
  generated_channels = gan_model.generated_data.shape.as_list()[3]

  summary.image(
      'real_data',
      eval_utils.image_grid(
          gan_model.real_data[:num_images],
          grid_shape=(grid_size, grid_size),
          image_shape=real_image_shape,
          num_channels=real_channels),
      max_outputs=1)
  summary.image(
      'generated_data',
      eval_utils.image_grid(
          gan_model.generated_data[:num_images],
          grid_shape=(grid_size, grid_size),
          image_shape=generated_image_shape,
          num_channels=generated_channels),
      max_outputs=1)

  if model_summaries:
    add_gan_model_summaries(gan_model)
Beispiel #13
0
  def testImageSummary(self):
    for depth in (1, 3, 4):
      for positive in False, True:
        with self.session(graph=ops.Graph()) as sess:
          shape = (4, 5, 7) + (depth,)
          bad_color = [255, 0, 0, 255][:depth]
          # Build a mostly random image with one nan
          const = np.random.randn(*shape).astype(np.float32)
          const[0, 1, 2] = 0  # Make the nan entry not the max
          if positive:
            const = 1 + np.maximum(const, 0)
            scale = 255 / const.reshape(4, -1).max(axis=1)
            offset = 0
          else:
            scale = 127 / np.abs(const.reshape(4, -1)).max(axis=1)
            offset = 128
          adjusted = np.floor(scale[:, None, None, None] * const + offset)
          const[0, 1, 2, depth // 2] = np.nan

          # Summarize
          summ = summary.image("img", const)
          value = sess.run(summ)
          self.assertEqual([], summ.get_shape())
          image_summ = self._AsSummary(value)

          # Decode the first image and check consistency
          image = image_ops.decode_png(image_summ.value[0]
                                       .image.encoded_image_string).eval()
          self.assertAllEqual(image[1, 2], bad_color)
          image[1, 2] = adjusted[0, 1, 2]
          self.assertAllClose(image, adjusted[0], rtol=2e-5, atol=2e-5)

          # Check the rest of the proto
          self._CheckProto(image_summ, shape)
  def testImageSummary(self):
    np.random.seed(7)
    for depth in (1, 3, 4):
      for positive in False, True:
        with self.test_session(graph=ops.Graph()) as sess:
          shape = (4, 5, 7) + (depth,)
          bad_color = [255, 0, 0, 255][:depth]
          # Build a mostly random image with one nan
          const = np.random.randn(*shape).astype(np.float32)
          const[0, 1, 2] = 0  # Make the nan entry not the max
          if positive:
            const = 1 + np.maximum(const, 0)
            scale = 255 / const.reshape(4, -1).max(axis=1)
            offset = 0
          else:
            scale = 127 / np.abs(const.reshape(4, -1)).max(axis=1)
            offset = 128
          adjusted = np.floor(scale[:, None, None, None] * const + offset)
          const[0, 1, 2, depth // 2] = np.nan

          # Summarize
          summ = summary.image("img", const)
          value = sess.run(summ)
          self.assertEqual([], summ.get_shape())
          image_summ = self._AsSummary(value)

          # Decode the first image and check consistency
          image = image_ops.decode_png(image_summ.value[0]
                                       .image.encoded_image_string).eval()
          self.assertAllEqual(image[1, 2], bad_color)
          image[1, 2] = adjusted[0, 1, 2]
          self.assertAllClose(image, adjusted[0], rtol=2e-5, atol=2e-5)

          # Check the rest of the proto
          self._CheckProto(image_summ, shape)
Beispiel #15
0
  def testImageSummaryUint8(self):
    np.random.seed(7)
    for depth in (1, 3, 4):
      with self.session(graph=ops.Graph()) as sess:
        shape = (4, 5, 7) + (depth,)

        # Build a random uint8 image
        images = np.random.randint(256, size=shape).astype(np.uint8)
        tf_images = ops.convert_to_tensor(images)
        self.assertEqual(tf_images.dtype, dtypes.uint8)

        # Summarize
        summ = summary.image("img", tf_images)
        value = sess.run(summ)
        self.assertEqual([], summ.get_shape())
        image_summ = self._AsSummary(value)

        # Decode the first image and check consistency.
        # Since we're uint8, everything should be exact.
        image = image_ops.decode_png(image_summ.value[0]
                                     .image.encoded_image_string).eval()
        self.assertAllEqual(image, images[0])

        # Check the rest of the proto
        self._CheckProto(image_summ, shape)
def add_image_summary(tensor, name=None, prefix=None, print_summary=False):
    """Adds an image summary for the given tensor.

  Args:
    tensor: a variable or op tensor with shape [batch,height,width,channels]
    name: the optional name for the summary.
    prefix: An optional prefix for the summary names.
    print_summary: If `True`, the summary is printed to stdout when the summary
      is computed.

  Returns:
    An image `Tensor` of type `string` whose contents are the serialized
    `Summary` protocol buffer.
  """
    summary_name = _get_summary_name(tensor, name, prefix)
    # If print_summary, then we need to make sure that this call doesn't add the
    # non-printing op to the collection. We'll add it to the collection later.
    collections = [] if print_summary else None
    op = summary.image(name=summary_name,
                       tensor=tensor,
                       collections=collections)
    if print_summary:
        op = logging_ops.Print(op, [tensor], summary_name)
        ops.add_to_collection(ops.GraphKeys.SUMMARIES, op)
    return op
  def testImageSummaryUint8(self):
    np.random.seed(7)
    for depth in (1, 3, 4):
      with self.test_session(graph=ops.Graph()) as sess:
        shape = (4, 5, 7) + (depth,)

        # Build a random uint8 image
        images = np.random.randint(256, size=shape).astype(np.uint8)
        tf_images = ops.convert_to_tensor(images)
        self.assertEqual(tf_images.dtype, dtypes.uint8)

        # Summarize
        summ = summary.image("img", tf_images)
        value = sess.run(summ)
        self.assertEqual([], summ.get_shape())
        image_summ = self._AsSummary(value)

        # Decode the first image and check consistency.
        # Since we're uint8, everything should be exact.
        image = image_ops.decode_png(image_summ.value[0]
                                     .image.encoded_image_string).eval()
        self.assertAllEqual(image, images[0])

        # Check the rest of the proto
        self._CheckProto(image_summ, shape)
def add_gan_model_image_summaries(gan_model,
                                  grid_size=4,
                                  model_summaries=True):
    """Adds image summaries for real and fake images.

  Args:
    gan_model: A GANModel tuple.
    grid_size: The size of an image grid.
    model_summaries: Also add summaries of the model.

  Raises:
    ValueError: If real and generated data aren't images.
  """
    if isinstance(gan_model, namedtuples.CycleGANModel):
        saved_params = locals()
        saved_params.pop('gan_model', None)
        with ops.name_scope('cyclegan_x2y_image_summaries'):
            add_gan_model_image_summaries(gan_model.model_x2y, **saved_params)
        with ops.name_scope('cyclegan_y2x_image_summaries'):
            add_gan_model_image_summaries(gan_model.model_y2x, **saved_params)
        return

    _assert_is_image(gan_model.real_data)
    _assert_is_image(gan_model.generated_data)

    num_images = grid_size**2
    real_image_shape = gan_model.real_data.shape.as_list()[1:3]
    generated_image_shape = gan_model.generated_data.shape.as_list()[1:3]
    real_channels = gan_model.real_data.shape.as_list()[3]
    generated_channels = gan_model.generated_data.shape.as_list()[3]

    summary.image('real_data',
                  eval_utils.image_grid(gan_model.real_data[:num_images],
                                        grid_shape=(grid_size, grid_size),
                                        image_shape=real_image_shape,
                                        num_channels=real_channels),
                  max_outputs=1)
    summary.image('generated_data',
                  eval_utils.image_grid(gan_model.generated_data[:num_images],
                                        grid_shape=(grid_size, grid_size),
                                        image_shape=generated_image_shape,
                                        num_channels=generated_channels),
                  max_outputs=1)

    if model_summaries:
        add_gan_model_summaries(gan_model)
def add_gan_model_image_summaries(gan_model, grid_size=4, model_summaries=True):
  """Adds image summaries for real and fake images.

  Args:
    gan_model: A GANModel tuple.
    grid_size: The size of an image grid.
    model_summaries: Also add summaries of the model.

  Raises:
    ValueError: If real and generated data aren't images.
  """
  if isinstance(gan_model, namedtuples.CycleGANModel):
    raise ValueError(
        '`add_gan_model_image_summaries` does not take CycleGANModels. Please '
        'use `add_cyclegan_image_summaries` instead.')
  _assert_is_image(gan_model.real_data)
  _assert_is_image(gan_model.generated_data)

  num_images = grid_size ** 2
  real_image_shape = gan_model.real_data.shape.as_list()[1:3]
  generated_image_shape = gan_model.generated_data.shape.as_list()[1:3]
  real_channels = gan_model.real_data.shape.as_list()[3]
  generated_channels = gan_model.generated_data.shape.as_list()[3]

  summary.image(
      'real_data',
      eval_utils.image_grid(
          gan_model.real_data[:num_images],
          grid_shape=(grid_size, grid_size),
          image_shape=real_image_shape,
          num_channels=real_channels),
      max_outputs=1)
  summary.image(
      'generated_data',
      eval_utils.image_grid(
          gan_model.generated_data[:num_images],
          grid_shape=(grid_size, grid_size),
          image_shape=generated_image_shape,
          num_channels=generated_channels),
      max_outputs=1)

  if model_summaries:
    add_gan_model_summaries(gan_model)
Beispiel #20
0
def add_gan_model_image_summaries(gan_model, grid_size=4, model_summaries=True):
  """Adds image summaries for real and fake images.

  Args:
    gan_model: A GANModel tuple.
    grid_size: The size of an image grid.
    model_summaries: Also add summaries of the model.

  Raises:
    ValueError: If real and generated data aren't images.
  """
  if isinstance(gan_model, namedtuples.CycleGANModel):
    raise ValueError(
        '`add_gan_model_image_summaries` does not take CycleGANModels. Please '
        'use `add_cyclegan_image_summaries` instead.')
  _assert_is_image(gan_model.real_data)
  _assert_is_image(gan_model.generated_data)

  num_images = grid_size ** 2
  real_image_shape = gan_model.real_data.shape.as_list()[1:3]
  generated_image_shape = gan_model.generated_data.shape.as_list()[1:3]
  real_channels = gan_model.real_data.shape.as_list()[3]
  generated_channels = gan_model.generated_data.shape.as_list()[3]

  summary.image(
      'real_data',
      eval_utils.image_grid(
          gan_model.real_data[:num_images],
          grid_shape=(grid_size, grid_size),
          image_shape=real_image_shape,
          num_channels=real_channels),
      max_outputs=1)
  summary.image(
      'generated_data',
      eval_utils.image_grid(
          gan_model.generated_data[:num_images],
          grid_shape=(grid_size, grid_size),
          image_shape=generated_image_shape,
          num_channels=generated_channels),
      max_outputs=1)

  if model_summaries:
    add_gan_model_summaries(gan_model)
    def testTFSummaryImage(self):
        """Verify processing of tf.summary.image."""
        event_sink = _EventGenerator(self, zero_out_timestamps=True)
        writer = SummaryToEventTransformer(event_sink)
        with self.test_session() as sess:
            ipt = array_ops.ones([10, 4, 4, 3], dtypes.uint8)
            # This is an interesting example, because the old tf.image_summary op
            # would throw an error here, because it would be tag reuse.
            # Using the tf node name instead allows argument re-use to the image
            # summary.
            with ops.name_scope('1'):
                summary_lib.image('images', ipt, max_outputs=1)
            with ops.name_scope('2'):
                summary_lib.image('images', ipt, max_outputs=2)
            with ops.name_scope('3'):
                summary_lib.image('images', ipt, max_outputs=3)
            merged = summary_lib.merge_all()
            writer.add_graph(sess.graph)
            for i in xrange(10):
                summ = sess.run(merged)
                writer.add_summary(summ, global_step=i)

        accumulator = ea.EventAccumulator(event_sink)
        accumulator.Reload()

        tags = [
            u'1/images/image', u'2/images/image/0', u'2/images/image/1',
            u'3/images/image/0', u'3/images/image/1', u'3/images/image/2'
        ]

        self.assertTagsEqual(accumulator.Tags(), {
            ea.IMAGES: tags,
            ea.GRAPH: True,
            ea.META_GRAPH: False,
        })
Beispiel #22
0
  def testTFSummaryImage(self):
    """Verify processing of tf.summary.image."""
    event_sink = _EventGenerator(self, zero_out_timestamps=True)
    writer = SummaryToEventTransformer(event_sink)
    with self.test_session() as sess:
      ipt = array_ops.ones([10, 4, 4, 3], dtypes.uint8)
      # This is an interesting example, because the old tf.image_summary op
      # would throw an error here, because it would be tag reuse.
      # Using the tf node name instead allows argument re-use to the image
      # summary.
      with ops.name_scope('1'):
        summary_lib.image('images', ipt, max_outputs=1)
      with ops.name_scope('2'):
        summary_lib.image('images', ipt, max_outputs=2)
      with ops.name_scope('3'):
        summary_lib.image('images', ipt, max_outputs=3)
      merged = summary_lib.merge_all()
      writer.add_graph(sess.graph)
      for i in xrange(10):
        summ = sess.run(merged)
        writer.add_summary(summ, global_step=i)

    accumulator = ea.EventAccumulator(event_sink)
    accumulator.Reload()

    tags = [
        u'1/images/image', u'2/images/image/0', u'2/images/image/1',
        u'3/images/image/0', u'3/images/image/1', u'3/images/image/2'
    ]

    self.assertTagsEqual(accumulator.Tags(), {
        ea.IMAGES: tags,
        ea.GRAPH: True,
        ea.META_GRAPH: False,
    })
 def testImageSummary(self):
     with self.test_session() as s:
         i = array_ops.ones((5, 4, 4, 3))
         with ops.name_scope('outer'):
             im = summary_lib.image('inner', i, max_outputs=3)
         summary_str = s.run(im)
     summary = summary_pb2.Summary()
     summary.ParseFromString(summary_str)
     values = summary.value
     self.assertEqual(len(values), 3)
     tags = sorted(v.tag for v in values)
     expected = sorted('outer/inner/image/{}'.format(i) for i in xrange(3))
     self.assertEqual(tags, expected)
Beispiel #24
0
    def build(self, img_a, img_b, labels):
        self.__call__(img_a, img_b, labels)

        # Add summaries
        num_images = self.grid_size**2
        image_shape = self.img_a.get_shape().as_list()[1:3]

        summary.image('image_a',
                      eval_utils.image_grid(self.img_a[:num_images, :, :, :3],
                                            grid_shape=(self.grid_size,
                                                        self.grid_size),
                                            image_shape=image_shape,
                                            num_channels=3),
                      max_outputs=1)

        summary.image('image_b',
                      eval_utils.image_grid(self.img_b[:num_images, :, :, :3],
                                            grid_shape=(self.grid_size,
                                                        self.grid_size),
                                            image_shape=image_shape,
                                            num_channels=3),
                      max_outputs=1)
 def testImageSummary(self):
   with self.cached_session() as s:
     i = array_ops.ones((5, 4, 4, 3))
     with ops.name_scope('outer'):
       im = summary_lib.image('inner', i, max_outputs=3)
     summary_str = s.run(im)
   summary = summary_pb2.Summary()
   summary.ParseFromString(summary_str)
   values = summary.value
   self.assertEqual(len(values), 3)
   tags = sorted(v.tag for v in values)
   expected = sorted('outer/inner/image/{}'.format(i) for i in xrange(3))
   self.assertEqual(tags, expected)
Beispiel #26
0
 def test_image_summary_v2(self):
   """Tests image v2 invocation."""
   with test.mock.patch.object(
       summary_v2, 'image', autospec=True) as mock_image_v2:
     with summary_ops_v2.create_summary_file_writer('/tmp/test').as_default(
         step=2):
       i = array_ops.ones((5, 4, 4, 3))
       with ops.name_scope_v2('outer'):
         tensor = summary_lib.image('image', i, max_outputs=3, family='family')
   # Returns empty string.
   self.assertEqual(tensor.numpy(), b'')
   self.assertEqual(tensor.dtype, dtypes.string)
   mock_image_v2.assert_called_once_with(
       'family/outer/family/image', data=i, step=2, max_outputs=3)
def add_gan_model_image_summaries(gan_model, grid_size=10):
  """Adds image summaries for real and fake images.

  Args:
    gan_model: A GANModel tuple.
    grid_size: The size of an image grid.

  Raises:
    ValueError: If real and generated data aren't images.
  """
  _assert_is_image(gan_model.real_data)
  _assert_is_image(gan_model.generated_data)

  num_images = grid_size ** 2
  real_image_shape = gan_model.real_data.shape.as_list()[1:3]
  generated_image_shape = gan_model.generated_data.shape.as_list()[1:3]
  real_channels = gan_model.real_data.shape.as_list()[3]
  generated_channels = gan_model.generated_data.shape.as_list()[3]

  summary.image(
      'real_data',
      eval_utils.image_grid(
          gan_model.real_data[:num_images],
          grid_shape=(grid_size, grid_size),
          image_shape=real_image_shape,
          num_channels=real_channels),
      max_outputs=1)
  summary.image(
      'generated_data',
      eval_utils.image_grid(
          gan_model.generated_data[:num_images],
          grid_shape=(grid_size, grid_size),
          image_shape=generated_image_shape,
          num_channels=generated_channels),
      max_outputs=1)
  add_gan_model_summaries(gan_model)
    def visualize(self):
        """Multi-channel visualization of densities as images.

    Creates and returns an image summary visualizing the current probabilty
    density estimates. The image contains one row for each channel. Within each
    row, the pixel intensities are proportional to probability values, and each
    row is centered on the median of the corresponding distribution.

    Returns:
      The created image summary.
    """
        with ops.name_scope(self._name_scope()):
            image = self._pmf
            image *= 255 / math_ops.reduce_max(image, axis=1, keepdims=True)
            image = math_ops.cast(image + .5, dtypes.uint8)
            image = image[None, :, :, None]
        return summary.image("pmf", image, max_outputs=1)
  def visualize(self):
    """Multi-channel visualization of densities as images.

    Creates and returns an image summary visualizing the current probabilty
    density estimates. The image contains one row for each channel. Within each
    row, the pixel intensities are proportional to probability values, and each
    row is centered on the median of the corresponding distribution.

    Returns:
      The created image summary.
    """
    with ops.name_scope(self._name_scope()):
      image = self._pmf
      image *= 255 / math_ops.reduce_max(image, axis=1, keepdims=True)
      image = math_ops.cast(image + .5, dtypes.uint8)
      image = image[None, :, :, None]
    return summary.image("pmf", image, max_outputs=1)
Beispiel #30
0
def add_image_summary(tensor, name=None, prefix=None, print_summary=False):
  """Adds an image summary for the given tensor.

  Args:
    tensor: a variable or op tensor with shape [batch,height,width,channels]
    name: the optional name for the summary.
    prefix: An optional prefix for the summary names.
    print_summary: If `True`, the summary is printed to stdout when the summary
      is computed.

  Returns:
    An image `Tensor` of type `string` whose contents are the serialized
    `Summary` protocol buffer.
  """
  summary_name = _get_summary_name(tensor, name, prefix)
  # If print_summary, then we need to make sure that this call doesn't add the
  # non-printing op to the collection. We'll add it to the collection later.
  collections = [] if print_summary else None
  op = summary.image(
      name=summary_name, tensor=tensor, collections=collections)
  if print_summary:
    op = logging_ops.Print(op, [tensor], summary_name)
    ops.add_to_collection(ops.GraphKeys.SUMMARIES, op)
  return op
    def visualise(self):

        # https://gist.github.com/kukuruza/03731dc494603ceab0c5
        def put_kernels_on_grid(kernel, shape, pad=1):
            print(shape)
            shape = np.array([shape[1], shape[2], shape[0], shape[3]])
            kernel = tf.transpose(kernel, (1, 2, 0, 3))

            def factorization(n):
                for i in range(int(sqrt(float(n))), 0, -1):
                    if n % i == 0:
                        if i == 1:
                            print('Who would enter a prime number of filters')
                        return (i, int(n / i))

            (grid_Y, grid_X) = factorization(shape[3])
            print('grid: %d = (%d, %d)' % (shape[3], grid_Y, grid_X))

            # pad X and Y
            x = tf.pad(kernel,
                       tf.constant([[pad, pad], [pad, pad], [0, 0], [0, 0]]),
                       mode='CONSTANT')

            # X and Y dimensions, w.r.t. padding
            Y = shape[0] + 2 * pad
            X = shape[1] + 2 * pad

            channels = shape[2]

            # put NumKernels to the 1st dimension
            x = tf.transpose(x, (3, 0, 1, 2))
            # organize grid on Y axis
            x = tf.reshape(x, tf.stack([grid_X, Y * grid_Y, X, channels]))

            # switch X and Y axes
            x = tf.transpose(x, (0, 2, 1, 3))
            # organize grid on X axis
            x = tf.reshape(x, tf.stack([1, X * grid_X, Y * grid_Y, channels]))

            # back to normal order (not combining with the next step for clarity)
            x = tf.transpose(x, (2, 1, 3, 0))
            # to tf.image_summary order [batch_size, height, width, channels],
            #   where in this case batch_size == 1
            x = tf.transpose(x, (3, 0, 1, 2))

            # scaling to [0, 255] is not necessary for tensorboard
            return x

        patch_shape = np.array([64, 64])
        prev_layer = None

        for idx, layer in enumerate(self.model.layers):
            if 'input' in layer.name:
                input_i = layer.input
                chn = self.input_shape[-1]
                input_img = array_ops.squeeze(input_i)
                img = input_img[0, :, :]
                img = tf.expand_dims(img, 0)
                if chn > 1:
                    for i in range(chn):
                        slices = img[:, :, :, i]
                        slices = tf.expand_dims(slices, 3)
                        tf_summary.image(layer.name + '_' + str(i), slices)
                else:
                    img = tf.expand_dims(img, 3)
                    tf_summary.image(layer.name, img)
            elif 'maxpool' in layer.name or 'up2d' in layer.name:
                output = layer.output
                chn = output.get_shape().as_list()[-1]
                if 'maxpool' in layer.name:
                    patch_shape = patch_shape / 2
                    if 'maxpool2d_i' in layer.name:
                        patch_shape = patch_shape * 2
                else:
                    patch_shape = patch_shape * 2
                shape = np.array([1])
                shape = np.concatenate([shape, patch_shape,
                                        np.array([chn])]).astype(int)

                print(layer.name, shape)

                output_name = layer.name.replace(':', '_')
                output_img = array_ops.squeeze(output)

                # pick the output of one slice
                output_img = output_img[0, :, :, :]
                output_img = tf.expand_dims(output_img, 0)
                output_img = put_kernels_on_grid(output_img, shape)
                tf_summary.image(output_name, output_img)

            elif idx == len(self.model.layers) - 1:
                trgt = self.model.inputs[-1]
                output = layer.output
                output_name = layer.name.replace(':', '_')
                output_img = array_ops.squeeze(output)
                output_img = output_img[0, :, :, :]
                output_img = array_ops.transpose(output_img, perm=[2, 0, 1])
                output_img = tf.concat([
                    output_img[0], output_img[1], output_img[2], trgt[0, :, :,
                                                                      0]
                ],
                                       axis=1)
                img = tf.expand_dims(output_img, 0)
                img = tf.expand_dims(img, 3)
                tf_summary.image(output_name, img)
            elif 'conv' in layer.name:
                chn = layer.filters
Beispiel #32
0
  def set_model(self, model):
    self.model = model
    self.sess = K.get_session()
    if self.histogram_freq and self.merged is None:
      for layer in self.model.layers:
        for weight in layer.weights:
          mapped_weight_name = weight.name.replace(':', '_')
          tf_summary.histogram(mapped_weight_name, weight)
          if self.write_grads:
            grads = model.optimizer.get_gradients(model.total_loss, weight)
            tf_summary.histogram('{}_grad'.format(mapped_weight_name), grads)
          if self.write_images:
            w_img = array_ops.squeeze(weight)
            shape = K.int_shape(w_img)
            if len(shape) == 2:  # dense layer kernel case
              if shape[0] > shape[1]:
                w_img = array_ops.transpose(w_img)
                shape = K.int_shape(w_img)
              w_img = array_ops.reshape(w_img, [1, shape[0], shape[1], 1])
            elif len(shape) == 3:  # convnet case
              if K.image_data_format() == 'channels_last':
                # switch to channels_first to display
                # every kernel as a separate image
                w_img = array_ops.transpose(w_img, perm=[2, 0, 1])
                shape = K.int_shape(w_img)
              w_img = array_ops.reshape(w_img,
                                        [shape[0], shape[1], shape[2], 1])
            elif len(shape) == 1:  # bias case
              w_img = array_ops.reshape(w_img, [1, shape[0], 1, 1])
            else:
              # not possible to handle 3D convnets etc.
              continue

            shape = K.int_shape(w_img)
            assert len(shape) == 4 and shape[-1] in [1, 3, 4]
            tf_summary.image(mapped_weight_name, w_img)

        if hasattr(layer, 'output'):
          tf_summary.histogram('{}_out'.format(layer.name), layer.output)
    self.merged = tf_summary.merge_all()

    if self.write_graph:
      self.writer = tf_summary.FileWriter(self.log_dir, self.sess.graph)
    else:
      self.writer = tf_summary.FileWriter(self.log_dir)

    if self.embeddings_freq:
      embeddings_layer_names = self.embeddings_layer_names

      if not embeddings_layer_names:
        embeddings_layer_names = [
            layer.name for layer in self.model.layers
            if type(layer).__name__ == 'Embedding'
        ]

      embeddings = {
          layer.name: layer.weights[0]
          for layer in self.model.layers if layer.name in embeddings_layer_names
      }

      self.saver = saver_lib.Saver(list(embeddings.values()))

      embeddings_metadata = {}

      if not isinstance(self.embeddings_metadata, str):
        embeddings_metadata = self.embeddings_metadata
      else:
        embeddings_metadata = {
            layer_name: self.embeddings_metadata
            for layer_name in embeddings.keys()
        }

      config = projector.ProjectorConfig()
      self.embeddings_ckpt_path = os.path.join(self.log_dir,
                                               'keras_embedding.ckpt')

      for layer_name, tensor in embeddings.items():
        embedding = config.embeddings.add()
        embedding.tensor_name = tensor.name

        if layer_name in embeddings_metadata:
          embedding.metadata_path = embeddings_metadata[layer_name]

      projector.visualize_embeddings(self.writer, config)
 def test_image(self):
   op = summary_lib.image('mona_lisa',
                          random_normal(shape=[1, 400, 200, 3]))
   value = self._value_from_op(op)
   assert value.HasField('image'), value
   self._assert_noop(value)
    def set_model(self, model):
        self.model = model
        self.sess = K.get_session()
        if self.histogram_freq and self.merged is None:
            for layer in self.model.layers:

                for weight in layer.weights:
                    tf_summary.histogram(weight.name, weight)
                    if self.write_images:
                        w_img = array_ops.squeeze(weight)
                        shape = w_img.get_shape()
                        if len(shape) > 1 and shape[0] > shape[1]:
                            w_img = array_ops.transpose(w_img)
                        if len(shape) == 1:
                            w_img = array_ops.expand_dims(w_img, 0)
                        w_img = array_ops.expand_dims(
                            array_ops.expand_dims(w_img, 0), -1)
                        tf_summary.image(weight.name, w_img)

                if hasattr(layer, 'output'):
                    tf_summary.histogram('{}_out'.format(layer.name),
                                         layer.output)
        self.merged = tf_summary.merge_all()

        if self.write_graph:
            self.writer = tf_summary.FileWriter(self.log_dir, self.sess.graph)
        else:
            self.writer = tf_summary.FileWriter(self.log_dir)

        if self.embeddings_freq:
            self.saver = saver_lib.Saver()

            embeddings_layer_names = self.embeddings_layer_names

            if not embeddings_layer_names:
                embeddings_layer_names = [
                    layer.name for layer in self.model.layers
                    if type(layer).__name__ == 'Embedding'
                ]

            embeddings = {
                layer.name: layer.weights[0]
                for layer in self.model.layers
                if layer.name in embeddings_layer_names
            }

            embeddings_metadata = {}

            if not isinstance(self.embeddings_metadata, str):
                embeddings_metadata = self.embeddings_metadata
            else:
                embeddings_metadata = {
                    layer_name: self.embeddings_metadata
                    for layer_name in embeddings.keys()
                }

            config = projector.ProjectorConfig()
            self.embeddings_logs = []

            for layer_name, tensor in embeddings.items():
                embedding = config.embeddings.add()
                embedding.tensor_name = tensor.name

                self.embeddings_logs.append(
                    os.path.join(self.log_dir, layer_name + '.ckpt'))

                if layer_name in embeddings_metadata:
                    embedding.metadata_path = embeddings_metadata[layer_name]

            projector.visualize_embeddings(self.writer, config)
Beispiel #35
0
  def set_model(self, model):
    self.model = model
    self.sess = K.get_session()
    if self.histogram_freq and self.merged is None:
      for layer in self.model.layers:

        for weight in layer.weights:
          tf_summary.histogram(weight.name, weight)
          if self.write_images:
            w_img = array_ops.squeeze(weight)
            shape = w_img.get_shape()
            if len(shape) > 1 and shape[0] > shape[1]:
              w_img = array_ops.transpose(w_img)
            if len(shape) == 1:
              w_img = array_ops.expand_dims(w_img, 0)
            w_img = array_ops.expand_dims(array_ops.expand_dims(w_img, 0), -1)
            tf_summary.image(weight.name, w_img)

        if hasattr(layer, 'output'):
          tf_summary.histogram('{}_out'.format(layer.name), layer.output)
    self.merged = tf_summary.merge_all()

    if self.write_graph:
      self.writer = tf_summary.FileWriter(self.log_dir, self.sess.graph)
    else:
      self.writer = tf_summary.FileWriter(self.log_dir)

    if self.embeddings_freq:
      self.saver = saver_lib.Saver()

      embeddings_layer_names = self.embeddings_layer_names

      if not embeddings_layer_names:
        embeddings_layer_names = [
            layer.name for layer in self.model.layers
            if type(layer).__name__ == 'Embedding'
        ]

      embeddings = {
          layer.name: layer.weights[0]
          for layer in self.model.layers if layer.name in embeddings_layer_names
      }

      embeddings_metadata = {}

      if not isinstance(self.embeddings_metadata, str):
        embeddings_metadata = self.embeddings_metadata
      else:
        embeddings_metadata = {
            layer_name: self.embeddings_metadata
            for layer_name in embeddings.keys()
        }

      config = projector.ProjectorConfig()
      self.embeddings_logs = []

      for layer_name, tensor in embeddings.items():
        embedding = config.embeddings.add()
        embedding.tensor_name = tensor.name

        self.embeddings_logs.append(
            os.path.join(self.log_dir, layer_name + '.ckpt'))

        if layer_name in embeddings_metadata:
          embedding.metadata_path = embeddings_metadata[layer_name]

      projector.visualize_embeddings(self.writer, config)
Beispiel #36
0
def add_stargan_image_summaries(stargan_model,
                                num_images=2,
                                display_diffs=False):
    """Adds image summaries to see StarGAN image results.

  If display_diffs is True, each image result has `2` rows and `num_domains + 1`
  columns.
  The first row looks like:
    [original_image, transformed_to_domain_0, transformed_to_domain_1, ...]
  The second row looks like:
    [no_modification_baseline, transformed_to_domain_0-original_image, ...]
  If display_diffs is False, only the first row is shown.

  IMPORTANT:
    Since the model originally does not transformed the image to every domains,
    we will transform them on-the-fly within this function in parallel.

  Args:
    stargan_model: A StarGANModel tuple.
    num_images: The number of examples/images to be transformed and shown.
    display_diffs: Also display the difference between generated and target.

  Raises:
    ValueError: If input_data is not images.
    ValueError: If input_data_domain_label is not rank 2.
    ValueError: If dimension 2 of input_data_domain_label is not fully defined.
  """

    _assert_is_image(stargan_model.input_data)
    stargan_model.input_data_domain_label.shape.assert_has_rank(2)
    stargan_model.input_data_domain_label.shape[1:].assert_is_fully_defined()

    num_domains = stargan_model.input_data_domain_label.get_shape().as_list(
    )[-1]

    def _build_image(image):
        """Helper function to create a result for each image on the fly."""

        # Expand the first dimension as batch_size = 1.
        images = array_ops.expand_dims(image, axis=0)

        # Tile the image num_domains times, so we can get all transformed together.
        images = array_ops.tile(images, [num_domains, 1, 1, 1])

        # Create the targets to 0, 1, 2, ..., num_domains-1.
        targets = array_ops.one_hot(list(range(num_domains)), num_domains)

        with variable_scope.variable_scope(stargan_model.generator_scope,
                                           reuse=True):

            # Add the original image.
            output_images_list = [image]

            # Generate the image and add to the list.
            gen_images = stargan_model.generator_fn(images, targets)
            gen_images_list = array_ops.split(gen_images, num_domains)
            gen_images_list = [
                array_ops.squeeze(img, axis=0) for img in gen_images_list
            ]
            output_images_list.extend(gen_images_list)

            # Display diffs.
            if display_diffs:
                diff_images = gen_images - images
                diff_images_list = array_ops.split(diff_images, num_domains)
                diff_images_list = [
                    array_ops.squeeze(img, axis=0) for img in diff_images_list
                ]
                output_images_list.append(array_ops.zeros_like(image))
                output_images_list.extend(diff_images_list)

            # Create the final image.
            final_image = eval_utils.image_reshaper(output_images_list,
                                                    num_cols=num_domains + 1)

        # Reduce the first rank.
        return array_ops.squeeze(final_image, axis=0)

    summary.image('stargan_image_generation',
                  map_fn.map_fn(_build_image,
                                stargan_model.input_data[:num_images],
                                parallel_iterations=num_images,
                                back_prop=False,
                                swap_memory=True),
                  max_outputs=num_images)
Beispiel #37
0
  def set_model(self, model):
    self.model = model
    self.sess = K.get_session()
    if self.histogram_freq and self.merged is None:
      for layer in self.model.layers:
        for weight in layer.weights:
          mapped_weight_name = weight.name.replace(':', '_')
          tf_summary.histogram(mapped_weight_name, weight)
          if self.write_grads:
            grads = model.optimizer.get_gradients(model.total_loss, weight)
            tf_summary.histogram('{}_grad'.format(mapped_weight_name), grads)
          if self.write_images:
            w_img = array_ops.squeeze(weight)
            shape = K.int_shape(w_img)
            if len(shape) == 2:  # dense layer kernel case
              if shape[0] > shape[1]:
                w_img = array_ops.transpose(w_img)
                shape = K.int_shape(w_img)
              w_img = array_ops.reshape(w_img, [1, shape[0], shape[1], 1])
            elif len(shape) == 3:  # convnet case
              if K.image_data_format() == 'channels_last':
                # switch to channels_first to display
                # every kernel as a separate image
                w_img = array_ops.transpose(w_img, perm=[2, 0, 1])
                shape = K.int_shape(w_img)
              w_img = array_ops.reshape(w_img,
                                        [shape[0], shape[1], shape[2], 1])
            elif len(shape) == 1:  # bias case
              w_img = array_ops.reshape(w_img, [1, shape[0], 1, 1])
            else:
              # not possible to handle 3D convnets etc.
              continue

            shape = K.int_shape(w_img)
            assert len(shape) == 4 and shape[-1] in [1, 3, 4]
            tf_summary.image(mapped_weight_name, w_img)

        if hasattr(layer, 'output'):
          tf_summary.histogram('{}_out'.format(layer.name), layer.output)
    self.merged = tf_summary.merge_all()

    if self.write_graph:
      self.writer = tf_summary.FileWriter(self.log_dir, self.sess.graph)
    else:
      self.writer = tf_summary.FileWriter(self.log_dir)

    if self.embeddings_freq:
      embeddings_layer_names = self.embeddings_layer_names

      if not embeddings_layer_names:
        embeddings_layer_names = [
            layer.name for layer in self.model.layers
            if type(layer).__name__ == 'Embedding'
        ]

      embeddings = {
          layer.name: layer.weights[0]
          for layer in self.model.layers if layer.name in embeddings_layer_names
      }

      self.saver = saver_lib.Saver(list(embeddings.values()))

      embeddings_metadata = {}

      if not isinstance(self.embeddings_metadata, str):
        embeddings_metadata = self.embeddings_metadata
      else:
        embeddings_metadata = {
            layer_name: self.embeddings_metadata
            for layer_name in embeddings.keys()
        }

      config = projector.ProjectorConfig()
      self.embeddings_ckpt_path = os.path.join(self.log_dir,
                                               'keras_embedding.ckpt')

      for layer_name, tensor in embeddings.items():
        embedding = config.embeddings.add()
        embedding.tensor_name = tensor.name

        if layer_name in embeddings_metadata:
          embedding.metadata_path = embeddings_metadata[layer_name]

      projector.visualize_embeddings(self.writer, config)
Beispiel #38
0
    def set_model(self, model):
        """Sets Keras model and creates summary ops."""

        self.model = model
        self.sess = K.get_session()
        # only make histogram summary op if it hasn't already been made
        if self.histogram_freq and self.merged is None:
            for layer in self.model.layers:
                for weight in layer.weights:
                    mapped_weight_name = weight.name.replace(':', '_')
                    tf_summary.histogram(mapped_weight_name, weight)
                    if self.write_images:
                        w_img = array_ops.squeeze(weight)
                        shape = K.int_shape(w_img)
                        if len(shape) == 2:  # dense layer kernel case
                            if shape[0] > shape[1]:
                                w_img = array_ops.transpose(w_img)
                                shape = K.int_shape(w_img)
                            w_img = array_ops.reshape(
                                w_img, [1, shape[0], shape[1], 1])
                        elif len(shape) == 3:  # convnet case
                            if K.image_data_format() == 'channels_last':
                                # switch to channels_first to display
                                # every kernel as a separate image
                                w_img = array_ops.transpose(w_img,
                                                            perm=[2, 0, 1])
                                shape = K.int_shape(w_img)
                            w_img = array_ops.reshape(
                                w_img, [shape[0], shape[1], shape[2], 1])
                        elif len(shape) == 1:  # bias case
                            w_img = array_ops.reshape(w_img,
                                                      [1, shape[0], 1, 1])
                        else:
                            # not possible to handle 3D convnets etc.
                            continue

                        shape = K.int_shape(w_img)
                        assert len(shape) == 4 and shape[-1] in [1, 3, 4]
                        tf_summary.image(mapped_weight_name, w_img)

                if self.write_grads:
                    for weight in layer.trainable_weights:
                        mapped_weight_name = weight.name.replace(':', '_')
                        grads = model.optimizer.get_gradients(
                            model.total_loss, weight)

                        def is_indexed_slices(grad):
                            return type(grad).__name__ == 'IndexedSlices'

                        grads = [
                            grad.values if is_indexed_slices(grad) else grad
                            for grad in grads
                        ]
                        tf_summary.histogram(
                            '{}_grad'.format(mapped_weight_name), grads)

                if hasattr(layer, 'output'):
                    if isinstance(layer.output, list):
                        for i, output in enumerate(layer.output):
                            tf_summary.histogram(
                                '{}_out_{}'.format(layer.name, i), output)
                    else:
                        tf_summary.histogram('{}_out'.format(layer.name),
                                             layer.output)
        self.merged = tf_summary.merge_all()

        if self.write_graph:
            self.writer = self._writer_class(self.log_dir, self.sess.graph)
        else:
            self.writer = self._writer_class(self.log_dir)

        # If both embedding_freq and embeddings_data are available, we will
        # visualize embeddings.
        if self.embeddings_freq and self.embeddings_data is not None:
            self.embeddings_data = standardize_input_data(
                self.embeddings_data, model.input_names)

            # If embedding_layer_names are not provided, get all of the embedding
            # layers from the model.
            embeddings_layer_names = self.embeddings_layer_names
            if not embeddings_layer_names:
                embeddings_layer_names = [
                    layer.name for layer in self.model.layers
                    if type(layer).__name__ == 'Embedding'
                ]

            self.assign_embeddings = []
            embeddings_vars = {}

            self.batch_id = batch_id = array_ops.placeholder(dtypes.int32)
            self.step = step = array_ops.placeholder(dtypes.int32)

            for layer in self.model.layers:
                if layer.name in embeddings_layer_names:
                    embedding_input = self.model.get_layer(layer.name).output
                    embedding_size = np.prod(embedding_input.shape[1:])
                    embedding_input = array_ops.reshape(
                        embedding_input, (step, int(embedding_size)))
                    shape = (self.embeddings_data[0].shape[0],
                             int(embedding_size))
                    embedding = variables.Variable(array_ops.zeros(shape),
                                                   name=layer.name +
                                                   '_embedding')
                    embeddings_vars[layer.name] = embedding
                    batch = state_ops.assign(
                        embedding[batch_id:batch_id + step], embedding_input)
                    self.assign_embeddings.append(batch)

            self.saver = saver.Saver(list(embeddings_vars.values()))

            # Create embeddings_metadata dictionary
            if isinstance(self.embeddings_metadata, str):
                embeddings_metadata = {
                    layer_name: self.embeddings_metadata
                    for layer_name in embeddings_vars.keys()
                }
            else:
                # If embedding_metadata is already a dictionary
                embeddings_metadata = self.embeddings_metadata

            try:
                from tensorboard.plugins import projector
            except ImportError:
                raise ImportError(
                    'Failed to import TensorBoard. Please make sure that '
                    'TensorBoard integration is complete."')

            # TODO(psv): Add integration tests to test embedding visualization
            # with TensorBoard callback. We are unable to write a unit test for this
            # because TensorBoard dependency assumes TensorFlow package is installed.
            config = projector.ProjectorConfig()
            for layer_name, tensor in embeddings_vars.items():
                embedding = config.embeddings.add()
                embedding.tensor_name = tensor.name

                if (embeddings_metadata is not None
                        and layer_name in embeddings_metadata):
                    embedding.metadata_path = embeddings_metadata[layer_name]

            projector.visualize_embeddings(self.writer, config)
Beispiel #39
0
def add_stargan_image_summaries(stargan_model,
                                num_images=2,
                                display_diffs=False):
  """Adds image summaries to see StarGAN image results.

  If display_diffs is True, each image result has `2` rows and `num_domains + 1`
  columns.
  The first row looks like:
    [original_image, transformed_to_domain_0, transformed_to_domain_1, ...]
  The second row looks like:
    [no_modification_baseline, transformed_to_domain_0-original_image, ...]
  If display_diffs is False, only the first row is shown.

  IMPORTANT:
    Since the model originally does not transformed the image to every domains,
    we will transform them on-the-fly within this function in parallel.

  Args:
    stargan_model: A StarGANModel tuple.
    num_images: The number of examples/images to be transformed and shown.
    display_diffs: Also display the difference between generated and target.

  Raises:
    ValueError: If input_data is not images.
    ValueError: If input_data_domain_label is not rank 2.
    ValueError: If dimension 2 of input_data_domain_label is not fully defined.
  """

  _assert_is_image(stargan_model.input_data)
  stargan_model.input_data_domain_label.shape.assert_has_rank(2)
  stargan_model.input_data_domain_label.shape[1:].assert_is_fully_defined()

  num_domains = stargan_model.input_data_domain_label.get_shape().as_list()[-1]

  def _build_image(image):
    """Helper function to create a result for each image on the fly."""

    # Expand the first dimension as batch_size = 1.
    images = array_ops.expand_dims(image, axis=0)

    # Tile the image num_domains times, so we can get all transformed together.
    images = array_ops.tile(images, [num_domains, 1, 1, 1])

    # Create the targets to 0, 1, 2, ..., num_domains-1.
    targets = array_ops.one_hot(list(range(num_domains)), num_domains)

    with variable_scope.variable_scope(
        stargan_model.generator_scope, reuse=True):

      # Add the original image.
      output_images_list = [image]

      # Generate the image and add to the list.
      gen_images = stargan_model.generator_fn(images, targets)
      gen_images_list = array_ops.split(gen_images, num_domains)
      gen_images_list = [
          array_ops.squeeze(img, axis=0) for img in gen_images_list
      ]
      output_images_list.extend(gen_images_list)

      # Display diffs.
      if display_diffs:
        diff_images = gen_images - images
        diff_images_list = array_ops.split(diff_images, num_domains)
        diff_images_list = [
            array_ops.squeeze(img, axis=0) for img in diff_images_list
        ]
        output_images_list.append(array_ops.zeros_like(image))
        output_images_list.extend(diff_images_list)

      # Create the final image.
      final_image = eval_utils.image_reshaper(
          output_images_list, num_cols=num_domains + 1)

    # Reduce the first rank.
    return array_ops.squeeze(final_image, axis=0)

  summary.image(
      'stargan_image_generation',
      functional_ops.map_fn(
          _build_image,
          stargan_model.input_data[:num_images],
          parallel_iterations=num_images,
          back_prop=False,
          swap_memory=True),
      max_outputs=num_images)
Beispiel #40
0
  def set_model(self, model):
    """Sets Keras model and creates summary ops."""

    self.model = model
    self.sess = K.get_session()
    # only make histogram summary op if it hasn't already been made
    if self.histogram_freq and self.merged is None:
      for layer in self.model.layers:
        for weight in layer.weights:
          mapped_weight_name = weight.name.replace(':', '_')
          tf_summary.histogram(mapped_weight_name, weight)
          if self.write_images:
            w_img = array_ops.squeeze(weight)
            shape = K.int_shape(w_img)
            if len(shape) == 2:  # dense layer kernel case
              if shape[0] > shape[1]:
                w_img = array_ops.transpose(w_img)
                shape = K.int_shape(w_img)
              w_img = array_ops.reshape(w_img, [1, shape[0], shape[1], 1])
            elif len(shape) == 3:  # convnet case
              if K.image_data_format() == 'channels_last':
                # switch to channels_first to display
                # every kernel as a separate image
                w_img = array_ops.transpose(w_img, perm=[2, 0, 1])
                shape = K.int_shape(w_img)
              w_img = array_ops.reshape(w_img,
                                        [shape[0], shape[1], shape[2], 1])
            elif len(shape) == 1:  # bias case
              w_img = array_ops.reshape(w_img, [1, shape[0], 1, 1])
            else:
              # not possible to handle 3D convnets etc.
              continue

            shape = K.int_shape(w_img)
            assert len(shape) == 4 and shape[-1] in [1, 3, 4]
            tf_summary.image(mapped_weight_name, w_img)

        if self.write_grads:
          for weight in layer.trainable_weights:
            mapped_weight_name = weight.name.replace(':', '_')
            grads = model.optimizer.get_gradients(model.total_loss, weight)

            def is_indexed_slices(grad):
              return type(grad).__name__ == 'IndexedSlices'

            grads = [grad.values if is_indexed_slices(grad) else grad
                     for grad in grads]
            tf_summary.histogram('{}_grad'.format(mapped_weight_name), grads)

        if hasattr(layer, 'output'):
          if isinstance(layer.output, list):
            for i, output in enumerate(layer.output):
              tf_summary.histogram('{}_out_{}'.format(layer.name, i), output)
          else:
            tf_summary.histogram('{}_out'.format(layer.name), layer.output)
    self.merged = tf_summary.merge_all()

    if self.write_graph:
      self.writer = self._writer_class(self.log_dir, self.sess.graph)
    else:
      self.writer = self._writer_class(self.log_dir)

    # If both embedding_freq and embeddings_data are available, we will
    # visualize embeddings.
    if self.embeddings_freq and self.embeddings_data is not None:
      self.embeddings_data = standardize_input_data(self.embeddings_data,
                                                    model.input_names)

      # If embedding_layer_names are not provided, get all of the embedding
      # layers from the model.
      embeddings_layer_names = self.embeddings_layer_names
      if not embeddings_layer_names:
        embeddings_layer_names = [
            layer.name
            for layer in self.model.layers
            if type(layer).__name__ == 'Embedding'
        ]

      self.assign_embeddings = []
      embeddings_vars = {}

      self.batch_id = batch_id = array_ops.placeholder(dtypes.int32)
      self.step = step = array_ops.placeholder(dtypes.int32)

      for layer in self.model.layers:
        if layer.name in embeddings_layer_names:
          embedding_input = self.model.get_layer(layer.name).output
          embedding_size = np.prod(embedding_input.shape[1:])
          embedding_input = array_ops.reshape(embedding_input,
                                              (step, int(embedding_size)))
          shape = (self.embeddings_data[0].shape[0], int(embedding_size))
          embedding = variables.Variable(
              array_ops.zeros(shape), name=layer.name + '_embedding')
          embeddings_vars[layer.name] = embedding
          batch = state_ops.assign(embedding[batch_id:batch_id + step],
                                   embedding_input)
          self.assign_embeddings.append(batch)

      self.saver = saver.Saver(list(embeddings_vars.values()))

      # Create embeddings_metadata dictionary
      if isinstance(self.embeddings_metadata, str):
        embeddings_metadata = {
            layer_name: self.embeddings_metadata
            for layer_name in embeddings_vars.keys()
        }
      else:
        # If embedding_metadata is already a dictionary
        embeddings_metadata = self.embeddings_metadata

      try:
        from tensorboard.plugins import projector
      except ImportError:
        raise ImportError('Failed to import TensorBoard. Please make sure that '
                          'TensorBoard integration is complete."')

      # TODO(psv): Add integration tests to test embedding visualization
      # with TensorBoard callback. We are unable to write a unit test for this
      # because TensorBoard dependency assumes TensorFlow package is installed.
      config = projector.ProjectorConfig()
      for layer_name, tensor in embeddings_vars.items():
        embedding = config.embeddings.add()
        embedding.tensor_name = tensor.name

        if (embeddings_metadata is not None and
            layer_name in embeddings_metadata):
          embedding.metadata_path = embeddings_metadata[layer_name]

      projector.visualize_embeddings(self.writer, config)