Exemple #1
0
def multiple_input_images(checkpoint, num_styles, input_images_dir,
                          input_images, which_styles):
    """Added by Raul Gombru. Computes style transfer for a list of images"""

    result_images = {}

    with tf.Graph().as_default(), tf.Session() as sess:

        # Need to initialize the image var to load the model
        image_path = input_images_dir + input_images[0]
        image = np.expand_dims(
            image_utils.load_np_image(os.path.expanduser(image_path)), 0)
        stylized_images = model.transform(
            tf.concat([image for _ in range(len(which_styles))], 0),
            normalizer_params={
                'labels': tf.constant(which_styles),
                'num_categories': num_styles,
                'center': True,
                'scale': True
            },
            reuse=tf.AUTO_REUSE)

        # Load the model
        _load_checkpoint(sess, checkpoint)

        # Stylize images
        for image_name in input_images:
            image_path = input_images_dir + image_name
            image = np.expand_dims(
                image_utils.load_np_image(os.path.expanduser(image_path)), 0)
            stylized_images = model.transform(tf.concat(
                [image for _ in range(len(which_styles))], 0),
                                              normalizer_params={
                                                  'labels':
                                                  tf.constant(which_styles),
                                                  'num_categories':
                                                  num_styles,
                                                  'center':
                                                  True,
                                                  'scale':
                                                  True
                                              },
                                              reuse=tf.AUTO_REUSE)
            stylized_images = stylized_images.eval()
            for which, stylized_image in zip(which_styles, stylized_images):
                result_images[image_name.split('.')[0] + '_' +
                              str(which)] = stylized_image[None, ...]

    return result_images
def _export_to_saved_model(checkpoint, alpha, num_styles):
  saved_model_dir = tempfile.mkdtemp()

  with tf.Graph().as_default(), tf.Session() as sess:
    # Define input tensor as placeholder to allow export
    input_image_tensor = tf.placeholder(tf.float32, shape=(1, None, None, 3),
                                        name='input_image')
    weights_tensor = tf.placeholder(tf.float32, shape=num_styles,
                                    name='style_weights')

    # Load the graph definition from Magenta
    stylized_image_tensor = model.transform(
      input_image_tensor,
      alpha=alpha,
      normalizer_fn=ops.weighted_instance_norm,
      normalizer_params={
        'weights': weights_tensor,
        'num_categories': FLAGS.num_styles,
        'center': True,
        'scale': True}
    )

    # Load model weights from downloaded checkpoint file
    _load_checkpoint(sess, checkpoint)

    # Write SavedModel for serving or conversion to TF Lite
    tf.saved_model.simple_save(
      sess, saved_model_dir,
      inputs={input_image_tensor.name: input_image_tensor,
              weights_tensor.name: weights_tensor},
      outputs={'stylized_image': stylized_image_tensor}
    )

  return saved_model_dir
Exemple #3
0
    def generate_image(self, image, output_number=6):

        # The official demo said self.num_styles should not be changed. But I don't know the reason.

        styles = range(self.num_styles)
        random.shuffle(styles)
        which_styles = styles[0:output_number]

        with tf.Graph().as_default(), tf.Session() as sess:
            stylized_images = model.transform(tf.concat(
                [image for _ in range(len(which_styles))], 0),
                                              normalizer_params={
                                                  'labels':
                                                  tf.constant(which_styles),
                                                  'num_categories':
                                                  self.num_styles,
                                                  'center':
                                                  True,
                                                  'scale':
                                                  True
                                              })

            model_saver = tf.train.Saver(tf.global_variables())
            model_saver.restore(sess, self.checkpoints)
            stylized_images = stylized_images.eval()

            return stylized_images
def main(unused_argv=None):
    # Load image
    image = np.expand_dims(image_utils.load_np_image(FLAGS.input_image), 0)

    which_styles = ast.literal_eval(FLAGS.which_styles)

    with tf.Graph().as_default(), tf.Session() as sess:
        stylized_images = model.transform(
            tf.concat(0, [image for _ in range(len(which_styles))]),
            normalizer_params={
                'labels': tf.constant(which_styles),
                'num_categories': FLAGS.num_styles,
                'center': True,
                'scale': True
            })
        model_saver = tf.train.Saver(tf.all_variables())
        checkpoint = FLAGS.checkpoint
        if tf.gfile.IsDirectory(checkpoint):
            checkpoint = tf.train.latest_checkpoint(checkpoint)
            tf.logging.info(
                'loading latest checkpoint file: {}'.format(checkpoint))
        model_saver.restore(sess, checkpoint)

        stylized_images = stylized_images.eval()
        for which, stylized_image in zip(which_styles, stylized_images):
            image_utils.save_np_image(
                stylized_image[None, ...],
                '{}/{}_{}.png'.format(FLAGS.output_dir, FLAGS.output_basename,
                                      which))
 def _style_sweep(inputs):
   """Transfers all styles onto the input one at a time."""
   inputs = tf.expand_dims(inputs, 0)
   stylized_inputs = [
       model.transform(
           inputs,
           reuse=True,
           normalizer_params=_create_normalizer_params(style_label))
       for _, style_label in enumerate(labels)]
   return tf.concat([inputs] + stylized_inputs, 0)
def _multiple_images(input_image, which_styles, output_dir):
  """Stylizes an image into a set of styles and writes them to disk."""
  with tf.Graph().as_default(), tf.Session() as sess:
    stylized_images = model.transform(
        tf.concat([input_image for _ in range(len(which_styles))], 0),
        normalizer_params={
            'labels': tf.constant(which_styles),
            'num_categories': FLAGS.num_styles,
            'center': True,
            'scale': True})
    _load_checkpoint(sess, FLAGS.checkpoint)

    stylized_images = stylized_images.eval()
    for which, stylized_image in zip(which_styles, stylized_images):
      image_utils.save_np_image(
          stylized_image[None, ...],
          '{}/{}_{}.png'.format(output_dir, FLAGS.output_basename, which))
Exemple #7
0
def _multiple_images(input_image, which_styles, output_dir):
  """Stylizes an image into a set of styles and writes them to disk."""
  with tf.Graph().as_default(), tf.Session() as sess:
    stylized_images = model.transform(
        tf.concat_v2([input_image for _ in range(len(which_styles))], 0),
        normalizer_params={
            'labels': tf.constant(which_styles),
            'num_categories': FLAGS.num_styles,
            'center': True,
            'scale': True})
    _load_checkpoint(sess, FLAGS.checkpoint)

    stylized_images = stylized_images.eval()
    for which, stylized_image in zip(which_styles, stylized_images):
      image_utils.save_np_image(
          stylized_image[None, ...],
          '{}/{}_{}.png'.format(output_dir, FLAGS.output_basename, which))
def generate_images():
    image = images['latest']
    image = image.astype('float32')
    print(type(image[0, 0, 0, 0]))
    with tf.Graph().as_default(), tf.Session() as sess:
        print('Started transforming')
        stylized_images = model.transform(
            tf.concat([image for _ in range(num_rendered)], 0),
            normalizer_params={
                'labels': tf.constant(which_styles),
                'num_categories': num_styles,
                'center': True,
                'scale': True
            })
        print('Finished transforming')
        model_saver = tf.train.Saver(tf.global_variables())
        print('Saver created')
        model_saver.restore(sess, checkpoint)
        for var in tqdm(tf.global_variables()):
            w = var.eval()
            w = np.nan_to_num(w)
            var.assign(w).eval()
        print('Model restored')
        stylized_images = stylized_images.eval()
        print('Images evaluated')

    for count, s_im in enumerate(stylized_images):
        # Got this warning:
        # Lossy conversion from float32 to uint8. Range [0, 1]. Convert image to uint8 prior to saving to suppress this warning.
        # So we resize the image
        converted_image = scipy.misc.imresize(s_im, (300, 533),
                                              interp='nearest',
                                              mode=None)
        display_image = scipy.misc.imresize(s_im, (133, 200),
                                            interp='nearest',
                                            mode=None)
        print('display iamge shape', display_image.shape)
        img = Image.fromarray(display_image)
        display_image = ImageTk.PhotoImage(image=img)
        print(converted_image.shape)
        images[count] = converted_image
        images[count + 10] = display_image
        name = f'./stylized_images/image-test7-{count}.jpg'
        imageio.imwrite(name, converted_image)
Exemple #9
0
def _multiple_styles(input_image, which_styles, output_dir):
  """Stylizes image into a linear combination of styles and writes to disk."""
  with tf.Graph().as_default(), tf.Session() as sess:
    mixture = _style_mixture(which_styles, FLAGS.num_styles)
    stylized_images = model.transform(
        input_image,
        normalizer_fn=ops.weighted_instance_norm,
        normalizer_params={
            'weights': tf.constant(mixture),
            'num_categories': FLAGS.num_styles,
            'center': True,
            'scale': True})
    _load_checkpoint(sess, FLAGS.checkpoint)

    stylized_image = stylized_images.eval()
    image_utils.save_np_image(
        stylized_image,
        os.path.join(output_dir, '%s_%s.png' % (
            FLAGS.output_basename, _describe_style(which_styles))))
def _multiple_styles(input_image, which_styles, output_dir):
  """Stylizes image into a linear combination of styles and writes to disk."""
  with tf.Graph().as_default(), tf.Session() as sess:
    mixture = _style_mixture(which_styles, FLAGS.num_styles)
    stylized_images = model.transform(
        input_image,
        normalizer_fn=ops.weighted_instance_norm,
        normalizer_params={
            'weights': tf.constant(mixture),
            'num_categories': FLAGS.num_styles,
            'center': True,
            'scale': True})
    _load_checkpoint(sess, FLAGS.checkpoint)

    stylized_image = stylized_images.eval()
    image_utils.save_np_image(
        stylized_image,
        os.path.join(output_dir, '%s_%s.png' % (
            FLAGS.output_basename, _describe_style(which_styles))))
Exemple #11
0
def main(unused_argv=None):
    with tf.Graph().as_default():
        # Force all input processing onto CPU in order to reserve the GPU for the
        # forward inference and back-propagation.
        device = '/cpu:0' if not FLAGS.ps_tasks else '/job:worker/cpu:0'
        with tf.device(
                tf.train.replica_device_setter(FLAGS.ps_tasks,
                                               worker_device=device)):
            inputs, _ = image_utils.imagenet_inputs(FLAGS.batch_size,
                                                    FLAGS.image_size)
            # Load style images and select one at random (for each graph execution, a
            # new random selection occurs)
            style_images, style_labels, \
                style_gram_matrices = image_utils.style_image_inputs(
                    os.path.expanduser(FLAGS.style_dataset_file),
                    batch_size=FLAGS.batch_size,
                    image_size=FLAGS.image_size,
                    square_crop=True,
                    shuffle=True)

        with tf.device(tf.train.replica_device_setter(FLAGS.ps_tasks)):
            # Process style and weight flags
            num_styles = FLAGS.num_styles
            if FLAGS.style_coefficients is None:
                style_coefficients = [1.0 for _ in range(num_styles)]
            else:
                style_coefficients = ast.literal_eval(FLAGS.style_coefficients)
            if len(style_coefficients) != num_styles:
                raise ValueError(
                    'number of style coefficients differs from number of styles'
                )
            content_weights = ast.literal_eval(FLAGS.content_weights)
            style_weights = ast.literal_eval(FLAGS.style_weights)

            # Rescale style weights dynamically based on the current style image
            style_coefficient = tf.gather(tf.constant(style_coefficients),
                                          style_labels)
            style_weights = dict((key, style_coefficient * style_weights[key])
                                 for key in style_weights)

            # Define the model
            stylized_inputs = model.transform(inputs,
                                              alpha=FLAGS.alpha,
                                              normalizer_params={
                                                  'labels': style_labels,
                                                  'num_categories': num_styles,
                                                  'center': True,
                                                  'scale': True
                                              })

            # Compute losses.
            total_loss, loss_dict = learning.total_loss(
                inputs, stylized_inputs, style_gram_matrices, content_weights,
                style_weights)
            for key in loss_dict:
                tf.summary.scalar(key, loss_dict[key])

            # Adding Image summaries to the tensorboard.
            tf.summary.image('image/0_inputs', inputs, 3)
            tf.summary.image('image/1_styles', style_images, 3)
            tf.summary.image('image/2_styled_inputs', stylized_inputs, 3)

            # Set up training
            optimizer = tf.train.AdamOptimizer(FLAGS.learning_rate)
            train_op = slim.learning.create_train_op(
                total_loss,
                optimizer,
                clip_gradient_norm=FLAGS.clip_gradient_norm,
                summarize_gradients=False)

            # Function to restore VGG16 parameters.
            init_fn_vgg = slim.assign_from_checkpoint_fn(
                vgg.checkpoint_file(), slim.get_variables('vgg_16'))

            # Run training
            slim.learning.train(train_op=train_op,
                                logdir=os.path.expanduser(FLAGS.train_dir),
                                master=FLAGS.master,
                                is_chief=FLAGS.task == 0,
                                number_of_steps=FLAGS.train_steps,
                                init_fn=init_fn_vgg,
                                save_summaries_secs=FLAGS.save_summaries_secs,
                                save_interval_secs=FLAGS.save_interval_secs)
Exemple #12
0
def main(unused_argv=None):
    with tf.Graph().as_default():
        # Force all input processing onto CPU in order to reserve the GPU for the
        # forward inference and back-propagation.
        device = '/cpu:0' if not FLAGS.ps_tasks else '/job:worker/cpu:0'
        with tf.device(
                tf.train.replica_device_setter(FLAGS.ps_tasks,
                                               worker_device=device)):
            inputs, _ = image_utils.imagenet_inputs(FLAGS.batch_size,
                                                    FLAGS.image_size)
            # Load style images and select one at random (for each graph execution, a
            # new random selection occurs)
            _, style_labels, style_gram_matrices = image_utils.style_image_inputs(
                os.path.expanduser(FLAGS.style_dataset_file),
                batch_size=FLAGS.batch_size,
                image_size=FLAGS.image_size,
                square_crop=True,
                shuffle=True)

        with tf.device(tf.train.replica_device_setter(FLAGS.ps_tasks)):
            # Process style and weight flags
            num_styles = FLAGS.num_styles
            if FLAGS.style_coefficients is None:
                style_coefficients = [1.0 for _ in range(num_styles)]
            else:
                style_coefficients = ast.literal_eval(FLAGS.style_coefficients)
            if len(style_coefficients) != num_styles:
                raise ValueError(
                    'number of style coefficients differs from number of styles'
                )
            content_weights = ast.literal_eval(FLAGS.content_weights)
            style_weights = ast.literal_eval(FLAGS.style_weights)

            # Rescale style weights dynamically based on the current style image
            style_coefficient = tf.gather(tf.constant(style_coefficients),
                                          style_labels)
            style_weights = dict((key, style_coefficient * value)
                                 for key, value in style_weights.items())

            # Define the model
            stylized_inputs = model.transform(inputs,
                                              normalizer_params={
                                                  'labels': style_labels,
                                                  'num_categories': num_styles,
                                                  'center': True,
                                                  'scale': True
                                              })

            # Compute losses.
            total_loss, loss_dict = learning.total_loss(
                inputs, stylized_inputs, style_gram_matrices, content_weights,
                style_weights)
            for key, value in loss_dict.items():
                tf.summary.scalar(key, value)

            instance_norm_vars = [
                var for var in slim.get_variables('transformer')
                if 'InstanceNorm' in var.name
            ]
            other_vars = [
                var for var in slim.get_variables('transformer')
                if 'InstanceNorm' not in var.name
            ]

            # Function to restore VGG16 parameters.
            # TODO(iansimon): This is ugly, but assign_from_checkpoint_fn doesn't
            # exist yet.
            saver_vgg = tf.train.Saver(slim.get_variables('vgg_16'))

            def init_fn_vgg(session):
                saver_vgg.restore(session, vgg.checkpoint_file())

            # Function to restore N-styles parameters.
            # TODO(iansimon): This is ugly, but assign_from_checkpoint_fn doesn't
            # exist yet.
            saver_n_styles = tf.train.Saver(other_vars)

            def init_fn_n_styles(session):
                saver_n_styles.restore(session,
                                       os.path.expanduser(FLAGS.checkpoint))

            def init_fn(session):
                init_fn_vgg(session)
                init_fn_n_styles(session)

            # Set up training.
            optimizer = tf.train.AdamOptimizer(FLAGS.learning_rate)
            train_op = slim.learning.create_train_op(
                total_loss,
                optimizer,
                clip_gradient_norm=FLAGS.clip_gradient_norm,
                variables_to_train=instance_norm_vars,
                summarize_gradients=False)

            # Run training.
            slim.learning.train(train_op=train_op,
                                logdir=os.path.expanduser(FLAGS.train_dir),
                                master=FLAGS.master,
                                is_chief=FLAGS.task == 0,
                                number_of_steps=FLAGS.train_steps,
                                init_fn=init_fn,
                                save_summaries_secs=FLAGS.save_summaries_secs,
                                save_interval_secs=FLAGS.save_interval_secs)
Exemple #13
0
with tf.Graph().as_default():
    device = '/cpu:0'
    with tf.device(
            tf.train.replica_device_setter(ps_task, worker_device=device)):
        inputs = image_utils.ms_coco_inputs(batch_size, image_size)

    with tf.device(tf.train.replica_device_setter(ps_task)):
        if os.path.exists('style_matrices.npz'):
            style_matrices = image_utils.style_input(image_size,
                                                     style_weights.keys())
            np.savez('style_matrices.npz', **style_matrices)
        else:
            style_matrices = np.load('style_matrices.npz')

        stylized_inputs = model.transform(inputs,
                                          normalizer_fn=slim.batch_norm,
                                          normalizer_params={})
        tf.image_summary('stylized', stylized_inputs)

        loss_dict = loss_functions.total_loss(inputs, style_matrices,
                                              stylized_inputs, content_weights,
                                              style_weights, tv_weight)
        for key, value in loss_dict.iteritems():
            tf.scalar_summary(key, value)

        # Set up training
        optimizer = tf.train.AdamOptimizer(learning_rate)
        train_op = slim.learning.create_train_op(loss_dict['total_loss'],
                                                 optimizer)

        init_fn = slim.assign_from_checkpoint_fn(
Exemple #14
0
def main(_):
    with tf.Graph().as_default():
        # Create inputs in [0, 1], as expected by vgg_16.
        inputs, _ = image_utils.imagenet_inputs(FLAGS.batch_size,
                                                FLAGS.image_size)
        evaluation_images = image_utils.load_evaluation_images(
            FLAGS.image_size)

        # Process style and weight flags
        if FLAGS.style_coefficients is None:
            style_coefficients = [1.0 for _ in range(FLAGS.num_styles)]
        else:
            style_coefficients = ast.literal_eval(FLAGS.style_coefficients)
        if len(style_coefficients) != FLAGS.num_styles:
            raise ValueError(
                'number of style coefficients differs from number of styles')
        content_weights = ast.literal_eval(FLAGS.content_weights)
        style_weights = ast.literal_eval(FLAGS.style_weights)

        # Load style images.
        style_images, labels, style_gram_matrices = image_utils.style_image_inputs(
            os.path.expanduser(FLAGS.style_dataset_file),
            batch_size=FLAGS.num_styles,
            image_size=FLAGS.image_size,
            square_crop=True,
            shuffle=False)
        labels = tf.unstack(labels)

        def _create_normalizer_params(style_label):
            """Creates normalizer parameters from a style label."""
            return {
                'labels': tf.expand_dims(style_label, 0),
                'num_categories': FLAGS.num_styles,
                'center': True,
                'scale': True
            }

        # Dummy call to simplify the reuse logic
        model.transform(inputs,
                        reuse=False,
                        normalizer_params=_create_normalizer_params(labels[0]))

        def _style_sweep(inputs):
            """Transfers all styles onto the input one at a time."""
            inputs = tf.expand_dims(inputs, 0)
            stylized_inputs = [
                model.transform(
                    inputs,
                    reuse=True,
                    normalizer_params=_create_normalizer_params(style_label))
                for _, style_label in enumerate(labels)
            ]
            return tf.concat_v2([inputs] + stylized_inputs, 0)

        if FLAGS.style_grid:
            style_row = tf.concat_v2([
                tf.ones([1, FLAGS.image_size, FLAGS.image_size, 3]),
                style_images
            ], 0)
            stylized_training_example = _style_sweep(inputs[0])
            stylized_evaluation_images = [
                _style_sweep(image) for image in tf.unstack(evaluation_images)
            ]
            stylized_noise = _style_sweep(
                tf.random_uniform([FLAGS.image_size, FLAGS.image_size, 3]))
            stylized_style_images = [
                _style_sweep(image) for image in tf.unstack(style_images)
            ]
            if FLAGS.style_crossover:
                grid = tf.concat_v2(
                    [style_row, stylized_training_example, stylized_noise] +
                    stylized_evaluation_images + stylized_style_images, 0)
            else:
                grid = tf.concat_v2(
                    [style_row, stylized_training_example, stylized_noise] +
                    stylized_evaluation_images, 0)
            tf.summary.image(
                'Style Grid',
                tf.cast(
                    image_utils.form_image_grid(grid, ([
                        3 + evaluation_images.get_shape().as_list()[0] +
                        FLAGS.num_styles, 1 + FLAGS.num_styles
                    ] if FLAGS.style_crossover else [
                        3 + evaluation_images.get_shape().as_list()[0], 1 +
                        FLAGS.num_styles
                    ]), [FLAGS.image_size, FLAGS.image_size], 3) * 255.0,
                    tf.uint8))

        if FLAGS.learning_curves:
            metrics = {}
            for i, label in enumerate(labels):
                gram_matrices = dict([
                    (key, value[i:i + 1])
                    for key, value in style_gram_matrices.iteritems()
                ])
                stylized_inputs = model.transform(
                    inputs,
                    reuse=True,
                    normalizer_params=_create_normalizer_params(label))
                _, loss_dict = learning.total_loss(inputs,
                                                   stylized_inputs,
                                                   gram_matrices,
                                                   content_weights,
                                                   style_weights,
                                                   reuse=i > 0)
                for key, value in loss_dict.iteritems():
                    metrics['{}_style_{}'.format(
                        key, i)] = slim.metrics.streaming_mean(value)

            names_values, names_updates = slim.metrics.aggregate_metric_map(
                metrics)
            for name, value in names_values.iteritems():
                summary_op = tf.summary.scalar(name, value, [])
                print_op = tf.Print(summary_op, [value], name)
                tf.add_to_collection(tf.GraphKeys.SUMMARIES, print_op)
            eval_op = names_updates.values()
            num_evals = FLAGS.num_evals
        else:
            eval_op = None
            num_evals = 1

        slim.evaluation.evaluation_loop(
            master=FLAGS.master,
            checkpoint_dir=os.path.expanduser(FLAGS.train_dir),
            logdir=os.path.expanduser(FLAGS.eval_dir),
            eval_op=eval_op,
            num_evals=num_evals,
            eval_interval_secs=FLAGS.eval_interval_secs)
def main(_):
  with tf.Graph().as_default():
    # Create inputs in [0, 1], as expected by vgg_16.
    inputs, _ = image_utils.imagenet_inputs(
        FLAGS.batch_size, FLAGS.image_size)
    evaluation_images = image_utils.load_evaluation_images(FLAGS.image_size)

    # Process style and weight flags
    if FLAGS.style_coefficients is None:
      style_coefficients = [1.0 for _ in range(FLAGS.num_styles)]
    else:
      style_coefficients = ast.literal_eval(FLAGS.style_coefficients)
    if len(style_coefficients) != FLAGS.num_styles:
      raise ValueError(
          'number of style coefficients differs from number of styles')
    content_weights = ast.literal_eval(FLAGS.content_weights)
    style_weights = ast.literal_eval(FLAGS.style_weights)

    # Load style images.
    style_images, labels, style_gram_matrices = image_utils.style_image_inputs(
        os.path.expanduser(FLAGS.style_dataset_file),
        batch_size=FLAGS.num_styles, image_size=FLAGS.image_size,
        square_crop=True, shuffle=False)
    labels = tf.unstack(labels)

    def _create_normalizer_params(style_label):
      """Creates normalizer parameters from a style label."""
      return {'labels': tf.expand_dims(style_label, 0),
              'num_categories': FLAGS.num_styles,
              'center': True,
              'scale': True}

    # Dummy call to simplify the reuse logic
    model.transform(inputs, reuse=False,
                    normalizer_params=_create_normalizer_params(labels[0]))

    def _style_sweep(inputs):
      """Transfers all styles onto the input one at a time."""
      inputs = tf.expand_dims(inputs, 0)
      stylized_inputs = [
          model.transform(
              inputs,
              reuse=True,
              normalizer_params=_create_normalizer_params(style_label))
          for _, style_label in enumerate(labels)]
      return tf.concat([inputs] + stylized_inputs, 0)

    if FLAGS.style_grid:
      style_row = tf.concat(
          [tf.ones([1, FLAGS.image_size, FLAGS.image_size, 3]), style_images],
          0)
      stylized_training_example = _style_sweep(inputs[0])
      stylized_evaluation_images = [
          _style_sweep(image) for image in tf.unstack(evaluation_images)]
      stylized_noise = _style_sweep(
          tf.random_uniform([FLAGS.image_size, FLAGS.image_size, 3]))
      stylized_style_images = [
          _style_sweep(image) for image in tf.unstack(style_images)]
      if FLAGS.style_crossover:
        grid = tf.concat(
            [style_row, stylized_training_example, stylized_noise] +
            stylized_evaluation_images + stylized_style_images,
            0)
      else:
        grid = tf.concat(
            [style_row, stylized_training_example, stylized_noise] +
            stylized_evaluation_images,
            0)
      if FLAGS.style_crossover:
        grid_shape = [
            3 + evaluation_images.get_shape().as_list()[0] + FLAGS.num_styles,
            1 + FLAGS.num_styles]
      else:
        grid_shape = [
            3 + evaluation_images.get_shape().as_list()[0],
            1 + FLAGS.num_styles]

      tf.summary.image(
          'Style Grid',
          tf.cast(
              image_utils.form_image_grid(
                  grid,
                  grid_shape,
                  [FLAGS.image_size, FLAGS.image_size],
                  3) * 255.0,
              tf.uint8))

    if FLAGS.learning_curves:
      metrics = {}
      for i, label in enumerate(labels):
        gram_matrices = dict(
            (key, value[i: i + 1])
            for key, value in style_gram_matrices.items())
        stylized_inputs = model.transform(
            inputs,
            reuse=True,
            normalizer_params=_create_normalizer_params(label))
        _, loss_dict = learning.total_loss(
            inputs, stylized_inputs, gram_matrices, content_weights,
            style_weights, reuse=i > 0)
        for key, value in loss_dict.items():
          metrics['{}_style_{}'.format(key, i)] = slim.metrics.streaming_mean(
              value)

      names_values, names_updates = slim.metrics.aggregate_metric_map(metrics)
      for name, value in names_values.items():
        summary_op = tf.summary.scalar(name, value, [])
        print_op = tf.Print(summary_op, [value], name)
        tf.add_to_collection(tf.GraphKeys.SUMMARIES, print_op)
      eval_op = names_updates.values()
      num_evals = FLAGS.num_evals
    else:
      eval_op = None
      num_evals = 1

    slim.evaluation.evaluation_loop(
        master=FLAGS.master,
        checkpoint_dir=os.path.expanduser(FLAGS.train_dir),
        logdir=os.path.expanduser(FLAGS.eval_dir),
        eval_op=eval_op,
        num_evals=num_evals,
        eval_interval_secs=FLAGS.eval_interval_secs)
def style_from_camera(checkpoint, num_styles, which_style, SaveVideo=False):
    """Added by Raul Gombru. Computes style transfer frame by frame"""

    # initialize video camera input
    cap = cv2.VideoCapture(0)

    if (SaveVideo):
        video = cv2.VideoWriter('output.avi', 0, 12.0, (640, 480))

    with tf.Graph().as_default(), tf.Session() as sess:

        frame_2_init = np.expand_dims(np.float32(np.zeros((640, 480, 3))), 0)
        stylized_images = model.transform(
            tf.concat([frame_2_init for _ in range(1)], 0),
            normalizer_params={
                'labels': tf.constant(which_style),
                'num_categories': num_styles,
                'center': True,
                'scale': True}, reuse=tf.AUTO_REUSE)

        _load_checkpoint(sess, checkpoint)

        while (True):
            start_time = time.time()
            # Read frame-by-frame
            ret, frame = cap.read()
            original_frame = frame
            frame = np.expand_dims(np.float32(frame/255.0), 0)
            stylized_images = model.transform(
                tf.concat([frame for _ in range(1)], 0),
                normalizer_params={
                    'labels': tf.constant(which_style),
                    'num_categories': num_styles,
                    'center': True,
                    'scale': True}, reuse=tf.AUTO_REUSE)
            stylized_images = stylized_images.eval()
            for which, stylized_image in zip(which_style, stylized_images):
                out_frame = stylized_image[None, ...]

            out_frame = (out_frame[0,:,:,:]*255).astype('uint8')

            elapsed_time = time.time() - start_time
            print("Running at --> " + str(1 / elapsed_time) + " fps")
            # Show frames
            cv2.namedWindow("input")
            cv2.imshow('input', original_frame)

            cv2.namedWindow("output")
            cv2.imshow('output', out_frame)

            if (SaveVideo):
                video.write(out_frame)

            if cv2.waitKey(1) & 0xFF == ord('q'):
                break

        # When everything done, release the capture
        cap.release()
        if (SaveVideo):
            video.release()
        cv2.destroyAllWindows()
def main(_):
    with tf.Graph().as_default():
        # Create inputs in [0, 1], as expected by vgg_16.
        inputs, _ = image_utils.imagenet_inputs(FLAGS.batch_size,
                                                FLAGS.image_size)
        evaluation_images = image_utils.load_evaluation_images(
            FLAGS.image_size)

        # Load style images.
        style_images, labels, style_gram_matrices = image_utils.style_image_inputs(
            os.path.expanduser(FLAGS.style_dataset_file),
            batch_size=FLAGS.num_styles,
            image_size=FLAGS.image_size,
            square_crop=True,
            shuffle=False)
        labels = tf.unstack(labels)

        def _create_normalizer_params(style_label):
            """Creates normalizer parameters from a style label."""
            return {
                'labels': tf.expand_dims(style_label, 0),
                'num_categories': FLAGS.num_styles,
                'center': True,
                'scale': True
            }

        # Dummy call to simplify the reuse logic
        model.transform(inputs,
                        alpha=FLAGS.alpha,
                        reuse=False,
                        normalizer_params=_create_normalizer_params(labels[0]))

        def _style_sweep(inputs):
            """Transfers all styles onto the input one at a time."""
            inputs = tf.expand_dims(inputs, 0)
            stylized_inputs = []
            for _, style_label in enumerate(labels):
                stylized_input = model.transform(
                    inputs,
                    alpha=FLAGS.alpha,
                    reuse=True,
                    normalizer_params=_create_normalizer_params(style_label))
                stylized_inputs.append(stylized_input)
            return tf.concat([inputs] + stylized_inputs, 0)

        style_row = tf.concat([
            tf.ones([1, FLAGS.image_size, FLAGS.image_size, 3]), style_images
        ], 0)
        stylized_training_example = _style_sweep(inputs[0])
        stylized_evaluation_images = [
            _style_sweep(image) for image in tf.unstack(evaluation_images)
        ]
        stylized_noise = _style_sweep(
            tf.random_uniform([FLAGS.image_size, FLAGS.image_size, 3]))
        stylized_style_images = [
            _style_sweep(image) for image in tf.unstack(style_images)
        ]
        if FLAGS.style_crossover:
            grid = tf.concat(
                [style_row, stylized_training_example, stylized_noise] +
                stylized_evaluation_images + stylized_style_images, 0)
        else:
            grid = tf.concat(
                [style_row, stylized_training_example, stylized_noise] +
                stylized_evaluation_images, 0)
        if FLAGS.style_crossover:
            grid_shape = [
                3 + evaluation_images.get_shape().as_list()[0] +
                FLAGS.num_styles, 1 + FLAGS.num_styles
            ]
        else:
            grid_shape = [
                3 + evaluation_images.get_shape().as_list()[0],
                1 + FLAGS.num_styles
            ]

        style_grid = tf.cast(
            image_utils.form_image_grid(
                grid, grid_shape, [FLAGS.image_size, FLAGS.image_size], 3) *
            255.0, tf.uint8)

        sess = tf.Session()
        with sess.as_default():
            np_array = tf.squeeze(style_grid).eval()
            im = Image.fromarray(np_array)
            im.save('matrix.png')
def main(unused_argv=None):
  with tf.Graph().as_default():
    # Force all input processing onto CPU in order to reserve the GPU for the
    # forward inference and back-propagation.
    device = '/cpu:0' if not FLAGS.ps_tasks else '/job:worker/cpu:0'
    with tf.device(tf.train.replica_device_setter(FLAGS.ps_tasks,
                                                  worker_device=device)):
      inputs, _ = image_utils.imagenet_inputs(FLAGS.batch_size,
                                              FLAGS.image_size)
      # Load style images and select one at random (for each graph execution, a
      # new random selection occurs)
      _, style_labels, style_gram_matrices = image_utils.style_image_inputs(
          os.path.expanduser(FLAGS.style_dataset_file),
          batch_size=FLAGS.batch_size, image_size=FLAGS.image_size,
          square_crop=True, shuffle=True)

    with tf.device(tf.train.replica_device_setter(FLAGS.ps_tasks)):
      # Process style and weight flags
      num_styles = FLAGS.num_styles
      if FLAGS.style_coefficients is None:
        style_coefficients = [1.0 for _ in range(num_styles)]
      else:
        style_coefficients = ast.literal_eval(FLAGS.style_coefficients)
      if len(style_coefficients) != num_styles:
        raise ValueError(
            'number of style coefficients differs from number of styles')
      content_weights = ast.literal_eval(FLAGS.content_weights)
      style_weights = ast.literal_eval(FLAGS.style_weights)

      # Rescale style weights dynamically based on the current style image
      style_coefficient = tf.gather(
          tf.constant(style_coefficients), style_labels)
      style_weights = dict([(key, style_coefficient * value)
                            for key, value in style_weights.iteritems()])

      # Define the model
      stylized_inputs = model.transform(
          inputs,
          normalizer_params={
              'labels': style_labels,
              'num_categories': num_styles,
              'center': True,
              'scale': True})

      # Compute losses.
      total_loss, loss_dict = learning.total_loss(
          inputs, stylized_inputs, style_gram_matrices, content_weights,
          style_weights)
      for key, value in loss_dict.iteritems():
        tf.summary.scalar(key, value)

      instance_norm_vars = [var for var in slim.get_variables('transformer')
                            if 'InstanceNorm' in var.name]
      other_vars = [var for var in slim.get_variables('transformer')
                    if 'InstanceNorm' not in var.name]

      # Function to restore VGG16 parameters.
      # TODO(iansimon): This is ugly, but assign_from_checkpoint_fn doesn't
      # exist yet.
      saver_vgg = tf.train.Saver(slim.get_variables('vgg_16'))
      def init_fn_vgg(session):
        saver_vgg.restore(session, vgg.checkpoint_file())

      # Function to restore N-styles parameters.
      # TODO(iansimon): This is ugly, but assign_from_checkpoint_fn doesn't
      # exist yet.
      saver_n_styles = tf.train.Saver(other_vars)
      def init_fn_n_styles(session):
        saver_n_styles.restore(session, os.path.expanduser(FLAGS.checkpoint))

      def init_fn(session):
        init_fn_vgg(session)
        init_fn_n_styles(session)

      # Set up training.
      optimizer = tf.train.AdamOptimizer(FLAGS.learning_rate)
      train_op = slim.learning.create_train_op(
          total_loss, optimizer, clip_gradient_norm=FLAGS.clip_gradient_norm,
          variables_to_train=instance_norm_vars, summarize_gradients=False)

      # Run training.
      slim.learning.train(
          train_op=train_op,
          logdir=os.path.expanduser(FLAGS.train_dir),
          master=FLAGS.master,
          is_chief=FLAGS.task == 0,
          number_of_steps=FLAGS.train_steps,
          init_fn=init_fn,
          save_summaries_secs=FLAGS.save_summaries_secs,
          save_interval_secs=FLAGS.save_interval_secs)