Exemple #1
0
def multiple_input_images(checkpoint, num_styles, input_images_dir,
                          input_images, which_styles):
    """Added by Raul Gombru. Computes style transfer for a list of images"""

    result_images = {}

    with tf.Graph().as_default(), tf.Session() as sess:

        # Need to initialize the image var to load the model
        image_path = input_images_dir + input_images[0]
        image = np.expand_dims(
            image_utils.load_np_image(os.path.expanduser(image_path)), 0)
        stylized_images = model.transform(
            tf.concat([image for _ in range(len(which_styles))], 0),
            normalizer_params={
                'labels': tf.constant(which_styles),
                'num_categories': num_styles,
                'center': True,
                'scale': True
            },
            reuse=tf.AUTO_REUSE)

        # Load the model
        _load_checkpoint(sess, checkpoint)

        # Stylize images
        for image_name in input_images:
            image_path = input_images_dir + image_name
            image = np.expand_dims(
                image_utils.load_np_image(os.path.expanduser(image_path)), 0)
            stylized_images = model.transform(tf.concat(
                [image for _ in range(len(which_styles))], 0),
                                              normalizer_params={
                                                  'labels':
                                                  tf.constant(which_styles),
                                                  'num_categories':
                                                  num_styles,
                                                  'center':
                                                  True,
                                                  'scale':
                                                  True
                                              },
                                              reuse=tf.AUTO_REUSE)
            stylized_images = stylized_images.eval()
            for which, stylized_image in zip(which_styles, stylized_images):
                result_images[image_name.split('.')[0] + '_' +
                              str(which)] = stylized_image[None, ...]

    return result_images
def main(unused_argv):
  tf.logging.set_verbosity(tf.logging.INFO)
  style_files = _parse_style_files(os.path.expanduser(FLAGS.style_files))
  with tf.python_io.TFRecordWriter(
      os.path.expanduser(FLAGS.output_file)) as writer:
    for style_label, style_file in enumerate(style_files):
      tf.logging.info(
          'Processing style file %s: %s' % (style_label, style_file))
      feature = {'label': _int64_feature(style_label)}

      style_image = image_utils.load_np_image(style_file)
      buf = io.BytesIO()
      skimage.io.imsave(buf, style_image, format='JPEG')
      buf.seek(0)
      feature['image_raw'] = _bytes_feature(buf.getvalue())

      if FLAGS.compute_gram_matrices:
        with tf.Graph().as_default():
          style_end_points = learning.precompute_gram_matrices(
              tf.expand_dims(tf.to_float(style_image), 0),
              # We use 'pool5' instead of 'fc8' because a) fully-connected
              # layers are already too deep in the network to be useful for
              # style and b) they're quite expensive to store.
              final_endpoint='pool5')
          for name in style_end_points:
            feature[name] = _float_feature(
                style_end_points[name].flatten().tolist())

      example = tf.train.Example(features=tf.train.Features(feature=feature))
      writer.write(example.SerializeToString())
  tf.logging.info('Output TFRecord file is saved at %s' % os.path.expanduser(
      FLAGS.output_file))
def main(unused_argv):
  tf.logging.set_verbosity(tf.logging.INFO)
  style_files = _parse_style_files(os.path.expanduser(FLAGS.style_files))
  with tf.python_io.TFRecordWriter(
      os.path.expanduser(FLAGS.output_file)) as writer:
    for style_label, style_file in enumerate(style_files):
      tf.logging.info(
          'Processing style file %s: %s' % (style_label, style_file))
      feature = {'label': _int64_feature(style_label)}

      style_image = image_utils.load_np_image(style_file)
      buf = io.BytesIO()
      scipy.misc.imsave(buf, style_image, format='JPEG')
      buf.seek(0)
      feature['image_raw'] = _bytes_feature(buf.getvalue())

      if FLAGS.compute_gram_matrices:
        with tf.Graph().as_default():
          style_end_points = learning.precompute_gram_matrices(
              tf.expand_dims(tf.to_float(style_image), 0),
              # We use 'pool5' instead of 'fc8' because a) fully-connected
              # layers are already too deep in the network to be useful for
              # style and b) they're quite expensive to store.
              final_endpoint='pool5')
          for name, matrix in style_end_points.iteritems():
            feature[name] = _float_feature(matrix.flatten().tolist())

      example = tf.train.Example(features=tf.train.Features(feature=feature))
      writer.write(example.SerializeToString())
  tf.logging.info('Output TFRecord file is saved at %s' % os.path.expanduser(
      FLAGS.output_file))
Exemple #4
0
def change_style(output):
    # without header, It doesn't work
    res = requests.get(output['file']['url_private_download'],
                       headers={'Authorization': 'Bearer %s' % BOT_API},
                       stream=True)

    # TODO: Now this doesn't use the image on memory.
    # TODO: Explore better resize.
    img_arr = misc.imresize(misc.imread(BytesIO(res.content), mode='RGB'),
                            (200, 200))
    misc.imsave('../imgs/input/temp.jpg', img_arr)
    img = np.expand_dims(
        image_utils.load_np_image(
            os.path.expanduser('../imgs/input/temp.jpg')), 0)

    # download models
    Style.download_checkpoints('check_points')

    style = Style()
    generated_imgs = style.generate_image(img)

    for i, generated_img in enumerate(generated_imgs):
        # TODO: Now this doesn't use the image on memory.
        file_name = '../imgs/output/' + 'generated_' + str(i) + '.jpg'
        misc.imsave(file_name, generated_img)

        slack.files.upload(file_name,
                           filename=file_name,
                           channels=output['channel'])
def main(unused_argv=None):
    # Load image
    image = np.expand_dims(image_utils.load_np_image(FLAGS.input_image), 0)

    which_styles = ast.literal_eval(FLAGS.which_styles)

    with tf.Graph().as_default(), tf.Session() as sess:
        stylized_images = model.transform(
            tf.concat(0, [image for _ in range(len(which_styles))]),
            normalizer_params={
                'labels': tf.constant(which_styles),
                'num_categories': FLAGS.num_styles,
                'center': True,
                'scale': True
            })
        model_saver = tf.train.Saver(tf.all_variables())
        checkpoint = FLAGS.checkpoint
        if tf.gfile.IsDirectory(checkpoint):
            checkpoint = tf.train.latest_checkpoint(checkpoint)
            tf.logging.info(
                'loading latest checkpoint file: {}'.format(checkpoint))
        model_saver.restore(sess, checkpoint)

        stylized_images = stylized_images.eval()
        for which, stylized_image in zip(which_styles, stylized_images):
            image_utils.save_np_image(
                stylized_image[None, ...],
                '{}/{}_{}.png'.format(FLAGS.output_dir, FLAGS.output_basename,
                                      which))
def initialize_image():
    input_image = './images/me.jpg'
    image = np.expand_dims(
        image_utils.load_np_image(os.path.expanduser(input_image)), 0)

    print(image.shape)
    print(type(image[0, 0, 0, 0]))
    images['latest'] = image
def main(unused_argv=None):
  # Load image
  image = np.expand_dims(image_utils.load_np_image(FLAGS.input_image), 0)

  output_dir = os.path.expanduser(FLAGS.output_dir)
  if not os.path.exists(output_dir):
    os.makedirs(output_dir)

  which_styles = ast.literal_eval(FLAGS.which_styles)
  if isinstance(which_styles, list):
    _multiple_images(image, which_styles, output_dir)
  elif isinstance(which_styles, dict):
    _multiple_styles(image, which_styles, output_dir)
  else:
    raise ValueError('--which_styles must be either a list of style indexes '
                     'or a dictionary mapping style indexes to weights.')
Exemple #8
0
def main(unused_argv=None):
    # Load image
    image = np.expand_dims(image_utils.load_np_image(FLAGS.input_image), 0)

    output_dir = os.path.expanduser(FLAGS.output_dir)
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)

    which_styles = ast.literal_eval(FLAGS.which_styles)
    if isinstance(which_styles, list):
        _multiple_images(image, which_styles, output_dir)
    elif isinstance(which_styles, dict):
        _multiple_styles(image, which_styles, output_dir)
    else:
        raise ValueError(
            '--which_styles must be either a list of style indexes '
            'or a dictionary mapping style indexes to weights.')
Exemple #9
0
def main(unused_argv=None):
    #print("begin main...")

    style_name = os.environ.get('Http_X_Style_Name', 'varied')
    style_index = os.environ.get('Http_X_Style_Index', '1')
    which_styles = os.environ.get('Http_X_Which_Styles',
                                  "{%s:1}" % style_index)

    #print("Using style: " + style_name)
    #print("Using index: " + style_index)

    # Monet
    # ---------------------
    # --num_styles=10 \
    # --checkpoint=/magenta-models/multistyle-pastiche-generator-monet.ckpt \
    # --input_image=$IMAGE \
    # --which_styles="{$i:1}" \
    # --output_dir="out_""$IMAGE" \
    # --output_basename="monet_styles"

    # Varied
    # ---------------------
    # --num_styles=32 \
    # --checkpoint=/magenta-models/multistyle-pastiche-generator-varied.ckpt \
    # --input_image=$IMAGE \
    # --which_styles="{$i:1}" \
    # --output_dir="out_""$IMAGE" \
    # --output_basename="varied_styles"

    if style_name == 'monet':
        FLAGS.num_styles = 10
        FLAGS.checkpoint = "/magenta-models/multistyle-pastiche-generator-monet.ckpt"
        FLAGS.input_image = input_image_name
        FLAGS.which_styles = which_styles
        FLAGS.output_dir = "out_content.jpg"
        FLAGS.output_basename = "monet_styles"
    elif style_name == 'varied':
        FLAGS.num_styles = 32
        FLAGS.checkpoint = "/magenta-models/multistyle-pastiche-generator-varied.ckpt"
        FLAGS.input_image = input_image_name
        FLAGS.which_styles = which_styles
        FLAGS.output_dir = "out_content.jpg"
        FLAGS.output_basename = "varied_styles"
    else:
        raise ValueError(
            'Style %s is not supported. Accepted values are "monet" or "varied"'
            % style_name)

    #print("loading image...")
    # Load image
    image = np.expand_dims(
        image_utils.load_np_image(os.path.expanduser(FLAGS.input_image)), 0)

    output_dir = os.path.expanduser(FLAGS.output_dir)
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)

    which_styles = ast.literal_eval(FLAGS.which_styles)
    if isinstance(which_styles, list):
        #print("multiple images...")
        _multiple_images(image, which_styles, output_dir)
    elif isinstance(which_styles, dict):
        #print("multiple styles...")
        _multiple_styles(image, which_styles, output_dir)
    else:
        raise ValueError(
            '--which_styles must be either a list of style indexes '
            'or a dictionary mapping style indexes to weights.')