Exemple #1
0
def upload_file():
    content = request.files['file']
    style = request.form.get('style')
    content.save(os.path.join(app.config['UPLOAD_FOLDER'], 'content.jpg'))
    #load in content and style image
    content = load_image('./static/image/upload/content.jpg')
    #Resize style to match content, makes code easier
    style = load_image('./static/image/s' + style + '.jpg',
                       shape=content.shape[-2:])
    vgg = model()
    target = stylize(content, style, vgg)
    x = im_convert(target)
    plt.imsave(app.config['UPLOAD_FOLDER'] + 'target.png', x)

    return render_template('success.html')
Exemple #2
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--image', help="Path to input image")
    parser.add_argument('--style', help="Which style to apply", choices=list(STYLE_IDS.keys()))
    parser.add_argument('--outdir', help="Path to output directory")
    parser.add_argument('--min-image-dim', help="Minimum image dimension", default=1000, type=int)
    parser.add_argument('--num-images', help="Number of images", default=50, type=int)
    parser.add_argument('--shimmer', help="Amount of movement", default=10, type=int)
    args = parser.parse_args()

    print("Loading image")
    img = load_image(args.image, args.min_image_dim)
    print("Downloading model")
    checkpoint_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), '%s.ckpt' % args.style)
    download_file_from_google_drive(STYLE_IDS[args.style], checkpoint_path)

    noise1 = args.shimmer * np.random.uniform(size=img.shape) + 177
    noise2 = args.shimmer * np.random.uniform(size=img.shape) + 177

    with imageio.get_writer(os.path.join(args.outdir, 'output.gif'), mode='I') as writer:
        for i in range(args.num_images):
            mult = np.sin(i * 2 * np.pi / args.num_images) / 2 + 0.5
            noise = mult * noise1 + (1 - mult) * noise2
            input_img = img + noise
            print("Transferring style (%d/%d)" % (i+1, args.num_images))
            out = style_transfer(input_img, checkpoint_path)
            writer.append_data(out)
            print("Image saved")
Exemple #3
0
def main(
        content_dir=os.getenv("CONTENT_PATH", "content"),
        model_dir=os.getenv("MODEL_PATH", "model"),
        style="mosaic",
        output_dir=os.getenv("OUTPUT_PATH", "output"),
        cuda=True,
):
    """

    :param content_dir:
    :param model_dir:
    :param style: one of candy, mosaic, rain_princess, udnie
    :param output_dir:
    :return:
    """

    filenames = os.listdir(content_dir)
    style_model = load_model(model_dir, style, cuda=cuda)
    for filename in filenames:
        print("Processing {}".format(filename))
        full_path = os.path.join(content_dir, filename)
        content_image = load_image(full_path)
        styled_image = stylize(style_model, content_image)
        output_path = os.path.join(output_dir, filename)
        save_image(output_path, styled_image[0])

    if cuda and not torch.cuda.is_available():
        print("ERROR: cuda is not available, try running on CPU")
        sys.exit(1)
    parser.add_argument("--semantic_thresh",
                        type=float,
                        help="Smantic threshold for label grouping",
                        default=0.5)
    parser.add_argument(
        "--similarity_metric",
        type=str,
        help="Smantic similarity metric for label grouping., default: li",
        default="li")
    similarity_metric_options = ["li", "wpath", "jcn", "lin", "wup", "res"]
    args = parser.parse_args()

    # For more information on the similarity metrics: http://gsi-upm.github.io/sematch/similarity/#word-similarity
    assert (args.similarity_metric in similarity_metric_options)

    image = load_image(args.raw_segmentation)

    segmentation_image = cv2.imread(args.raw_segmentation)

    segmentation_masks, _ = merge_segments(segmentation_image,
                                           segmentation_image,
                                           args.semantic_thresh,
                                           args.similarity_metric)

    result_dir = 'semantic_merge'
    if not os.path.exists(result_dir):
        os.mkdir(result_dir)
    cv2.imwrite(
        change_filename(result_dir, args.raw_segmentation,
                        '_{}'.format(args.semantic_thresh), '.png'),
        reduce_dict(segmentation_masks, image))
def main(style, content_key, request_id):
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    s3 = boto3.resource('s3')
    bucket_name = os.environ.get('IMAGE_BUCKET_NAME')
    bucket = s3.Bucket(bucket_name)

    content_object = bucket.Object(content_key)
    content_tmp = tempfile.NamedTemporaryFile()
    with open(content_tmp.name, 'wb') as f:
        content_object.download_fileobj(f)
    content = load_image(content_tmp.name).to(device)

    style_key = 'style/%s.jpg' % style
    style_object = bucket.Object(style_key)
    style_tmp = tempfile.NamedTemporaryFile()
    with open(style_tmp.name, 'wb') as f:
        style_object.download_fileobj(f)
    # Resize style to match content, makes code easier
    style = load_image(style_tmp.name, shape=content.shape[-2:]).to(device)

    # get the "features" portion of VGG19 (we will not need the "classifier" portion)
    vgg = models.vgg19(pretrained=True).features

    # freeze all VGG parameters since we're only optimizing the target image
    for param in vgg.parameters():
        param.requires_grad_(False)

    vgg.to(device)

    # weights for each style layer
    # weighting earlier layers more will result in *larger* style artifacts
    # notice we are excluding `conv4_2` our content representation
    style_weights = {
        'conv1_1': 1.,
        'conv2_1': 0.8,
        'conv3_1': 0.5,
        'conv4_1': 0.3,
        'conv5_1': 0.1,
    }

    # you may choose to leave these as is
    content_weight = 1  # alpha
    style_weight = 1e6  # beta

    # iteration hyperparameters
    optimizer = optim.Adam
    steps = 2000  # decide how many iterations to update your image (5000)

    # for displaying the target image, intermittently
    show_every = 400

    result = transfer(
        device,
        style,
        content,
        vgg,
        content_weight,
        style_weight,
        style_weights,
        optimizer,
        steps,
    )
    result = Image.fromarray((255 * im_convert(result)).astype(np.uint8))

    file_ext = '.%s' % content_key.split('.')[-1]
    result_tmp = tempfile.NamedTemporaryFile(suffix=file_ext)
    result.save(result_tmp.name)
    result_key = content_key.replace('/input/', '/output/')

    with open(result_tmp.name, 'rb') as result:
        bucket.put_object(
            Key=result_key,
            Body=result,
        )

    sqs = boto3.client('sqs')
    result_message = {
        'requestId': request_id,
        'resultKey': result_key,
    }
    sqs.send_message(
        QueueUrl=os.environ.get('JOB_DONE_QUEUE_URL'),
        MessageBody=json.dumps(result_message),
    )
def postprocess(image):
    return (image + 1.0) * 127.5


if __name__ == '__main__':
    from datetime import datetime
    from style_transfer import load_image, save_image, adam_variables_initializer, compute_nima_loss
    import tensorflow as tf

    timestamp = datetime.now().strftime('%Y_%m_%d_%H_%M')

    result_dir = 'result_' + timestamp
    os.mkdir(result_dir)

    content_image = load_image("content.png")
    content_image = preprocess(content_image)

    with tf.Session() as sess:
        transfer_image = tf.Variable(content_image)

        sess.run(tf.global_variables_initializer())

        nima_loss = compute_nima_loss(transfer_image)

        optimizer = tf.train.AdamOptimizer(learning_rate=0.001)

        train_op = optimizer.minimize(nima_loss, var_list=[transfer_image])
        sess.run(adam_variables_initializer(optimizer, [transfer_image]))

        min_loss, best_image = float("inf"), None