Esempio n. 1
0
def my_form_post():
    #text = request.form['text']
    # imageName = text.split("/")[-1]
    # contentImagePath = "images/input/"+imageName
    # outputImagePath = "images/output/"+imageName
    # # print(file_path)
    # if not os.path.exists(outputImagePath):
    #     f = open(contentImagePath, 'wb')
    #     f.write(requests.get(text).content)
    #     f.close()
        #style_transfer("images/profile.jpg")
    # data = {}
    # # try:
    # data = request.get_json()['data']
    # # except Exception:
    # #     return jsonify(status_code='400', msg='Bad Request'), 400

    # data = base64.b64decode(data)

    # image = io.BytesIO(data)
    style_transfer('profile.jpg','style.jpg')

    result_dir = os.getcwd() + "/static/css/images"

    urls = [f for f in os.listdir(result_dir)]

    return render_template("show_images.html", urls = urls)
Esempio n. 2
0
def main():

    content_placeholder = tf.placeholder(tf.float32, shape=(1, 256, 256, 3))
    generated_images = model.style_transfer(content_placeholder)

    output_format = tf.cast(generated_images, tf.uint8)
    jpegs = tf.map_fn(lambda image: tf.image.encode_jpeg(image),
                      output_format,
                      dtype=tf.string)

    with tf.Session() as sess:
        file = tf.train.latest_checkpoint(model_path)
        if not file:
            print('Could not find trained model in %s' % model_path)
            return
        print('Using model from %s' % file)
        saver = tf.train.Saver()
        saver.restore(sess, file)

        images_t = sess.run(
            jpegs, feed_dict={content_placeholder: get_image(CONTENT_IMAGE)})
        with open('res.jpg', 'wb') as f:
            f.write(images_t[0])
Esempio n. 3
0
    os.mkdir(flags.summary_dir)


if flags.mode == 'train':
    with tf.device('/gpu:0'):

        list1_ = os.listdir(flags.train_dir1)
        list1 = [os.path.join(flags.train_dir1,_) for _ in list1_ if _.split('.')[1]=='jpg']

        list2_ = os.listdir(flags.train_dir2)
        list2 = [os.path.join(flags.train_dir2, _) for _ in list2_ if _.split('.')[1] == 'jpg']

        batch1 = imgs_loder(list1,flags)
        batch2 = imgs_loder(list2,flags)

        net = style_transfer(batch1,batch2,flags)

        tf.summary.scalar('discrim_loss',net.discrim_loss)
        tf.summary.scalar('content_loss', net.discrim_loss)
        tf.summary.scalar('adversarial_loss', net.adversarial_loss)

        tf.summary.scalar('learning_rate', net.learning_rate)

        saver = tf.train.Saver(max_to_keep=10)

        var_list = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)

        var_list2 = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='generator')

        weight_initiallizer = tf.train.Saver(var_list2)
Esempio n. 4
0
    decoder.to(device)

    T = transforms.Compose([
        transforms.ToTensor(),
    ])

    logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(message)s')

    for content_path in content_paths:
        for style_path in style_paths:
            logging.info("Transfering from " + style_path + " to " +
                         content_path + " ...")

            content = T(Image.open(content_path).convert('RGB'))
            content = content.to(device).unsqueeze(0)

            style = T(Image.open(style_path).convert('RGB'))
            style = style.to(device).unsqueeze(0)

            with torch.no_grad():
                output = model.style_transfer(encoder, decoder, content, style,
                                              args.alpha)
            output = output.cpu()

            output_name = '{:s}/{:s}_stylized_{:s}{:s}'.format(
                args.output,
                splitext(basename(content_path))[0],
                splitext(basename(style_path))[0], ".jpg")

            save_image(output, output_name)