Exemple #1
0
def make_image(name, var, image_dims):
    prod = np.prod(image_dims)
    grid = form_image_grid(tf.reshape(var, [BATCH_SIZE, prod]),
                           [GRID_ROWS, GRID_COLS], image_dims, 1)
    s_grid = tf.squeeze(grid, axis=0)

    # This reproduces the code in: tensorflow/core/kernels/summary_image_op.cc
    im_min = tf.reduce_min(s_grid)
    im_max = tf.reduce_max(s_grid)

    kZeroThreshold = tf.constant(1e-6)
    max_val = tf.maximum(tf.abs(im_min), tf.abs(im_max))

    offset = tf.cond(im_min < tf.constant(0.0), lambda: tf.constant(128.0),
                     lambda: tf.constant(0.0))
    scale = tf.cond(
        im_min < tf.constant(0.0),
        lambda: tf.cond(max_val < kZeroThreshold, lambda: tf.constant(0.0),
                        lambda: tf.div(127.0, max_val)),
        lambda: tf.cond(im_max < kZeroThreshold, lambda: tf.constant(0.0),
                        lambda: tf.div(255.0, im_max)))
    s_grid = tf.cast(tf.add(tf.multiply(s_grid, scale), offset), tf.uint8)
    enc = tf.image.encode_jpeg(s_grid)

    fwrite = tf.write_file(name, enc)
    return fwrite
Exemple #2
0
def main(_):
    with tf.Graph().as_default():
        # Create inputs in [0, 1], as expected by vgg_16.
        inputs, _ = image_utils.imagenet_inputs(FLAGS.batch_size,
                                                FLAGS.image_size)
        evaluation_images = image_utils.load_evaluation_images(
            FLAGS.image_size)

        # Process style and weight flags
        if FLAGS.style_coefficients is None:
            style_coefficients = [1.0 for _ in range(FLAGS.num_styles)]
        else:
            style_coefficients = ast.literal_eval(FLAGS.style_coefficients)
        if len(style_coefficients) != FLAGS.num_styles:
            raise ValueError(
                'number of style coefficients differs from number of styles')
        content_weights = ast.literal_eval(FLAGS.content_weights)
        style_weights = ast.literal_eval(FLAGS.style_weights)

        # Load style images.
        style_images, labels, style_gram_matrices = image_utils.style_image_inputs(
            os.path.expanduser(FLAGS.style_dataset_file),
            batch_size=FLAGS.num_styles,
            image_size=FLAGS.image_size,
            square_crop=True,
            shuffle=False)
        labels = tf.unstack(labels)

        def _create_normalizer_params(style_label):
            """Creates normalizer parameters from a style label."""
            return {
                'labels': tf.expand_dims(style_label, 0),
                'num_categories': FLAGS.num_styles,
                'center': True,
                'scale': True
            }

        # Dummy call to simplify the reuse logic
        model.transform(inputs,
                        reuse=False,
                        normalizer_params=_create_normalizer_params(labels[0]))

        def _style_sweep(inputs):
            """Transfers all styles onto the input one at a time."""
            inputs = tf.expand_dims(inputs, 0)
            stylized_inputs = [
                model.transform(
                    inputs,
                    reuse=True,
                    normalizer_params=_create_normalizer_params(style_label))
                for _, style_label in enumerate(labels)
            ]
            return tf.concat_v2([inputs] + stylized_inputs, 0)

        if FLAGS.style_grid:
            style_row = tf.concat_v2([
                tf.ones([1, FLAGS.image_size, FLAGS.image_size, 3]),
                style_images
            ], 0)
            stylized_training_example = _style_sweep(inputs[0])
            stylized_evaluation_images = [
                _style_sweep(image) for image in tf.unstack(evaluation_images)
            ]
            stylized_noise = _style_sweep(
                tf.random_uniform([FLAGS.image_size, FLAGS.image_size, 3]))
            stylized_style_images = [
                _style_sweep(image) for image in tf.unstack(style_images)
            ]
            if FLAGS.style_crossover:
                grid = tf.concat_v2(
                    [style_row, stylized_training_example, stylized_noise] +
                    stylized_evaluation_images + stylized_style_images, 0)
            else:
                grid = tf.concat_v2(
                    [style_row, stylized_training_example, stylized_noise] +
                    stylized_evaluation_images, 0)
            tf.summary.image(
                'Style Grid',
                tf.cast(
                    image_utils.form_image_grid(grid, ([
                        3 + evaluation_images.get_shape().as_list()[0] +
                        FLAGS.num_styles, 1 + FLAGS.num_styles
                    ] if FLAGS.style_crossover else [
                        3 + evaluation_images.get_shape().as_list()[0], 1 +
                        FLAGS.num_styles
                    ]), [FLAGS.image_size, FLAGS.image_size], 3) * 255.0,
                    tf.uint8))

        if FLAGS.learning_curves:
            metrics = {}
            for i, label in enumerate(labels):
                gram_matrices = dict([
                    (key, value[i:i + 1])
                    for key, value in style_gram_matrices.iteritems()
                ])
                stylized_inputs = model.transform(
                    inputs,
                    reuse=True,
                    normalizer_params=_create_normalizer_params(label))
                _, loss_dict = learning.total_loss(inputs,
                                                   stylized_inputs,
                                                   gram_matrices,
                                                   content_weights,
                                                   style_weights,
                                                   reuse=i > 0)
                for key, value in loss_dict.iteritems():
                    metrics['{}_style_{}'.format(
                        key, i)] = slim.metrics.streaming_mean(value)

            names_values, names_updates = slim.metrics.aggregate_metric_map(
                metrics)
            for name, value in names_values.iteritems():
                summary_op = tf.summary.scalar(name, value, [])
                print_op = tf.Print(summary_op, [value], name)
                tf.add_to_collection(tf.GraphKeys.SUMMARIES, print_op)
            eval_op = names_updates.values()
            num_evals = FLAGS.num_evals
        else:
            eval_op = None
            num_evals = 1

        slim.evaluation.evaluation_loop(
            master=FLAGS.master,
            checkpoint_dir=os.path.expanduser(FLAGS.train_dir),
            logdir=os.path.expanduser(FLAGS.eval_dir),
            eval_op=eval_op,
            num_evals=num_evals,
            eval_interval_secs=FLAGS.eval_interval_secs)
Exemple #3
0
def layer_grid_summary(name, var, image_dims):
    prod = np.prod(image_dims)
    grid = form_image_grid(tf.reshape(var, [BATCH_SIZE, prod]), [GRID_ROWS, GRID_COLS], image_dims, 1)
    return tf.summary.image(name, grid)
def main(_):
  with tf.Graph().as_default():
    # Create inputs in [0, 1], as expected by vgg_16.
    inputs, _ = image_utils.imagenet_inputs(
        FLAGS.batch_size, FLAGS.image_size)
    evaluation_images = image_utils.load_evaluation_images(FLAGS.image_size)

    # Process style and weight flags
    if FLAGS.style_coefficients is None:
      style_coefficients = [1.0 for _ in range(FLAGS.num_styles)]
    else:
      style_coefficients = ast.literal_eval(FLAGS.style_coefficients)
    if len(style_coefficients) != FLAGS.num_styles:
      raise ValueError(
          'number of style coefficients differs from number of styles')
    content_weights = ast.literal_eval(FLAGS.content_weights)
    style_weights = ast.literal_eval(FLAGS.style_weights)

    # Load style images.
    style_images, labels, style_gram_matrices = image_utils.style_image_inputs(
        os.path.expanduser(FLAGS.style_dataset_file),
        batch_size=FLAGS.num_styles, image_size=FLAGS.image_size,
        square_crop=True, shuffle=False)
    labels = tf.unstack(labels)

    def _create_normalizer_params(style_label):
      """Creates normalizer parameters from a style label."""
      return {'labels': tf.expand_dims(style_label, 0),
              'num_categories': FLAGS.num_styles,
              'center': True,
              'scale': True}

    # Dummy call to simplify the reuse logic
    model.transform(inputs, reuse=False,
                    normalizer_params=_create_normalizer_params(labels[0]))

    def _style_sweep(inputs):
      """Transfers all styles onto the input one at a time."""
      inputs = tf.expand_dims(inputs, 0)
      stylized_inputs = [
          model.transform(
              inputs,
              reuse=True,
              normalizer_params=_create_normalizer_params(style_label))
          for _, style_label in enumerate(labels)]
      return tf.concat([inputs] + stylized_inputs, 0)

    if FLAGS.style_grid:
      style_row = tf.concat(
          [tf.ones([1, FLAGS.image_size, FLAGS.image_size, 3]), style_images],
          0)
      stylized_training_example = _style_sweep(inputs[0])
      stylized_evaluation_images = [
          _style_sweep(image) for image in tf.unstack(evaluation_images)]
      stylized_noise = _style_sweep(
          tf.random_uniform([FLAGS.image_size, FLAGS.image_size, 3]))
      stylized_style_images = [
          _style_sweep(image) for image in tf.unstack(style_images)]
      if FLAGS.style_crossover:
        grid = tf.concat(
            [style_row, stylized_training_example, stylized_noise] +
            stylized_evaluation_images + stylized_style_images,
            0)
      else:
        grid = tf.concat(
            [style_row, stylized_training_example, stylized_noise] +
            stylized_evaluation_images,
            0)
      if FLAGS.style_crossover:
        grid_shape = [
            3 + evaluation_images.get_shape().as_list()[0] + FLAGS.num_styles,
            1 + FLAGS.num_styles]
      else:
        grid_shape = [
            3 + evaluation_images.get_shape().as_list()[0],
            1 + FLAGS.num_styles]

      tf.summary.image(
          'Style Grid',
          tf.cast(
              image_utils.form_image_grid(
                  grid,
                  grid_shape,
                  [FLAGS.image_size, FLAGS.image_size],
                  3) * 255.0,
              tf.uint8))

    if FLAGS.learning_curves:
      metrics = {}
      for i, label in enumerate(labels):
        gram_matrices = dict(
            (key, value[i: i + 1])
            for key, value in style_gram_matrices.items())
        stylized_inputs = model.transform(
            inputs,
            reuse=True,
            normalizer_params=_create_normalizer_params(label))
        _, loss_dict = learning.total_loss(
            inputs, stylized_inputs, gram_matrices, content_weights,
            style_weights, reuse=i > 0)
        for key, value in loss_dict.items():
          metrics['{}_style_{}'.format(key, i)] = slim.metrics.streaming_mean(
              value)

      names_values, names_updates = slim.metrics.aggregate_metric_map(metrics)
      for name, value in names_values.items():
        summary_op = tf.summary.scalar(name, value, [])
        print_op = tf.Print(summary_op, [value], name)
        tf.add_to_collection(tf.GraphKeys.SUMMARIES, print_op)
      eval_op = names_updates.values()
      num_evals = FLAGS.num_evals
    else:
      eval_op = None
      num_evals = 1

    slim.evaluation.evaluation_loop(
        master=FLAGS.master,
        checkpoint_dir=os.path.expanduser(FLAGS.train_dir),
        logdir=os.path.expanduser(FLAGS.eval_dir),
        eval_op=eval_op,
        num_evals=num_evals,
        eval_interval_secs=FLAGS.eval_interval_secs)
def main(_):
    with tf.Graph().as_default():
        # Create inputs in [0, 1], as expected by vgg_16.
        inputs, _ = image_utils.imagenet_inputs(FLAGS.batch_size,
                                                FLAGS.image_size)
        evaluation_images = image_utils.load_evaluation_images(
            FLAGS.image_size)

        # Load style images.
        style_images, labels, style_gram_matrices = image_utils.style_image_inputs(
            os.path.expanduser(FLAGS.style_dataset_file),
            batch_size=FLAGS.num_styles,
            image_size=FLAGS.image_size,
            square_crop=True,
            shuffle=False)
        labels = tf.unstack(labels)

        def _create_normalizer_params(style_label):
            """Creates normalizer parameters from a style label."""
            return {
                'labels': tf.expand_dims(style_label, 0),
                'num_categories': FLAGS.num_styles,
                'center': True,
                'scale': True
            }

        # Dummy call to simplify the reuse logic
        model.transform(inputs,
                        alpha=FLAGS.alpha,
                        reuse=False,
                        normalizer_params=_create_normalizer_params(labels[0]))

        def _style_sweep(inputs):
            """Transfers all styles onto the input one at a time."""
            inputs = tf.expand_dims(inputs, 0)
            stylized_inputs = []
            for _, style_label in enumerate(labels):
                stylized_input = model.transform(
                    inputs,
                    alpha=FLAGS.alpha,
                    reuse=True,
                    normalizer_params=_create_normalizer_params(style_label))
                stylized_inputs.append(stylized_input)
            return tf.concat([inputs] + stylized_inputs, 0)

        style_row = tf.concat([
            tf.ones([1, FLAGS.image_size, FLAGS.image_size, 3]), style_images
        ], 0)
        stylized_training_example = _style_sweep(inputs[0])
        stylized_evaluation_images = [
            _style_sweep(image) for image in tf.unstack(evaluation_images)
        ]
        stylized_noise = _style_sweep(
            tf.random_uniform([FLAGS.image_size, FLAGS.image_size, 3]))
        stylized_style_images = [
            _style_sweep(image) for image in tf.unstack(style_images)
        ]
        if FLAGS.style_crossover:
            grid = tf.concat(
                [style_row, stylized_training_example, stylized_noise] +
                stylized_evaluation_images + stylized_style_images, 0)
        else:
            grid = tf.concat(
                [style_row, stylized_training_example, stylized_noise] +
                stylized_evaluation_images, 0)
        if FLAGS.style_crossover:
            grid_shape = [
                3 + evaluation_images.get_shape().as_list()[0] +
                FLAGS.num_styles, 1 + FLAGS.num_styles
            ]
        else:
            grid_shape = [
                3 + evaluation_images.get_shape().as_list()[0],
                1 + FLAGS.num_styles
            ]

        style_grid = tf.cast(
            image_utils.form_image_grid(
                grid, grid_shape, [FLAGS.image_size, FLAGS.image_size], 3) *
            255.0, tf.uint8)

        sess = tf.Session()
        with sess.as_default():
            np_array = tf.squeeze(style_grid).eval()
            im = Image.fromarray(np_array)
            im.save('matrix.png')